hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7b5f00385afd1218c29643068449bf1af12389e7
| 269
|
py
|
Python
|
redirink/insights/pagination.py
|
Egor4ik325/redirink
|
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
|
[
"MIT"
] | null | null | null |
redirink/insights/pagination.py
|
Egor4ik325/redirink
|
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
|
[
"MIT"
] | null | null | null |
redirink/insights/pagination.py
|
Egor4ik325/redirink
|
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
|
[
"MIT"
] | 1
|
2021-12-31T00:46:31.000Z
|
2021-12-31T00:46:31.000Z
|
from rest_framework import pagination
class InsightPagination(pagination.PageNumberPagination):
"""
Response data pagination for insight.
"""
page_size = 30
max_page_size = 30
page_query_param = "page"
page_size_query_param = "page_size"
| 20.692308
| 57
| 0.72119
|
182df7613c8cdc5d4a724151ac50db0bd2e98c61
| 3,528
|
py
|
Python
|
tools/utils/skeleton_to_ntu_format.py
|
bruceyo/TSMF
|
80f0306d4aca697acf5776e792f4428035a6cd88
|
[
"BSD-2-Clause"
] | 8
|
2021-03-01T03:12:03.000Z
|
2021-12-14T12:45:09.000Z
|
tools/utils/skeleton_to_ntu_format.py
|
bruceyo/TSMF
|
80f0306d4aca697acf5776e792f4428035a6cd88
|
[
"BSD-2-Clause"
] | null | null | null |
tools/utils/skeleton_to_ntu_format.py
|
bruceyo/TSMF
|
80f0306d4aca697acf5776e792f4428035a6cd88
|
[
"BSD-2-Clause"
] | 3
|
2021-07-09T02:49:33.000Z
|
2022-03-14T09:26:39.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 7 13:52:08 2020
@author: bruce
"""
import os
import numpy as np
skeleton_folder = 'Z:/PKUMMD/data/PKU_Skeleton_Renew/'
label_folder = 'Z:/PKUMMD/Label/Train_Label_PKU_final/'
save_path = 'Z:/PKUMMD/data/skeleton_ntu/'
view_dic = {'L':'001','M':'002','R':'003'}
file_id = 2
file_view = 'L'
def array_to_skl_lines(skeleton_frame):
skeleton_lines = ''
for i in range(0,75,3):
x = skeleton_frame[i:i+3]
x = map(lambda x: str(x), x)
line = " ".join(a for a in x)
skeleton_lines = skeleton_lines + line + '\n'
return skeleton_lines
def transfer_and_save(file_id, file_view):
sample_file = ''
file_id_=''
if file_id / 10 < 1:
file_id_ = '00' + str(file_id)
elif file_id / 100 < 1:
file_id_ = '0' + str(file_id)
else:
file_id_ = str(file_id)
sample_file = '0' + file_id_ + '-' + file_view + '.txt'
if not os.path.exists(label_folder + sample_file):
print(label_folder + sample_file + ' not exist!')
return
else:
print(label_folder + sample_file + ' do!')
skl_list = np.loadtxt(label_folder+ sample_file, delimiter=",")
skl_file = skeleton_folder + sample_file
skeleton_frames = np.loadtxt(skl_file)
for i in range(0, skl_list.shape[0]):
sample_lines = ""
sample_length = int(skl_list[i][2] - skl_list[i][1]) + 1
sample_lines = str(sample_length) + '\n' # first lines, length of the action sample
skeleton_sample = skeleton_frames[int(skl_list[i][1])-1:int(skl_list[i][2]),:150]
for skl_id in range(0,skeleton_sample.shape[0]):
skeleton_frame = skeleton_sample[skl_id]
if np.sum(skeleton_frame[75:150])==0:
sample_lines = sample_lines + '1\n'
sample_lines = sample_lines + '6 1 0 0 1 1 0 -0.437266 -0.117168 2'
sample_lines = sample_lines + '\n25\n'
skeleton_lines = array_to_skl_lines(skeleton_frame[0:75])
sample_lines = sample_lines + skeleton_lines
else:
sample_lines = sample_lines + '2\n'
sample_lines = sample_lines + '6 1 0 0 1 1 0 -0.437266 -0.117168 2'
sample_lines = sample_lines + '\n25\n'
skeleton_lines_1 = array_to_skl_lines(skeleton_frame[0:75])
skeleton_lines_2 = array_to_skl_lines(skeleton_frame[75:150])
sample_lines = sample_lines + skeleton_lines_1 + skeleton_lines_2
class_id = int(skl_list[i][0])
class_id_ = ''
if class_id / 10 < 1:
class_id_ = '00' + str(class_id)
elif class_id / 100 < 1:
class_id_ = '0' + str(class_id)
else:
class_id_ = str(class_id)
index = ''
if (i+1) / 10 < 1:
index = '00' + str(i+1)
elif (i+1) / 100 < 1:
index = '0' + str(i+1)
else:
index = str(i+1)
view_id = view_dic[file_view]
save_name = 'F' + file_id_ + 'V' + view_id + 'C' + class_id_ + 'L' + index
with open(save_path + save_name + ".skeleton", "w") as text_file:
text_file.write(sample_lines)
for file_id in range(2,365):
for file_view in ['L', 'M', 'R']:
transfer_and_save(file_id, file_view)
| 36
| 99
| 0.55102
|
eafcf58fcd51b58e372a5b34a15db0f55e9b0b7f
| 84,473
|
py
|
Python
|
pymc/tests/test_distributions_random.py
|
percevalve/pymc
|
05aa247957553f608d9690ff9f61240aa35b71f8
|
[
"Apache-2.0"
] | null | null | null |
pymc/tests/test_distributions_random.py
|
percevalve/pymc
|
05aa247957553f608d9690ff9f61240aa35b71f8
|
[
"Apache-2.0"
] | null | null | null |
pymc/tests/test_distributions_random.py
|
percevalve/pymc
|
05aa247957553f608d9690ff9f61240aa35b71f8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import itertools
from typing import Callable, List, Optional
import aesara
import numpy as np
import numpy.random as nr
import numpy.testing as npt
import pytest
import scipy.stats as st
from numpy.testing import assert_almost_equal, assert_array_almost_equal
try:
from polyagamma import random_polyagamma
_polyagamma_not_installed = False
except ImportError: # pragma: no cover
_polyagamma_not_installed = True
def random_polyagamma(*args, **kwargs):
raise RuntimeError("polyagamma package is not installed!")
from aeppl.logprob import _logprob
from scipy.special import expit
import pymc as pm
from pymc.aesaraf import change_rv_size, floatX, intX
from pymc.distributions.continuous import get_tau_sigma, interpolated
from pymc.distributions.discrete import _OrderedLogistic, _OrderedProbit
from pymc.distributions.dist_math import clipped_beta_rvs
from pymc.distributions.logprob import logpt
from pymc.distributions.multivariate import _OrderedMultinomial, quaddist_matrix
from pymc.distributions.shape_utils import to_tuple
from pymc.tests.helpers import SeededTest, select_by_precision
from pymc.tests.test_distributions import (
Domain,
R,
RandomPdMatrix,
Rplus,
Simplex,
build_model,
product,
)
def pymc_random(
dist,
paramdomains,
ref_rand,
valuedomain=None,
size=10000,
alpha=0.05,
fails=10,
extra_args=None,
model_args=None,
):
if valuedomain is None:
valuedomain = Domain([0], edges=(None, None))
if model_args is None:
model_args = {}
model, param_vars = build_model(dist, valuedomain, paramdomains, extra_args)
model_dist = change_rv_size(model.named_vars["value"], size, expand=True)
pymc_rand = aesara.function([], model_dist)
domains = paramdomains.copy()
for pt in product(domains, n_samples=100):
pt = pm.Point(pt, model=model)
pt.update(model_args)
# Update the shared parameter variables in `param_vars`
for k, v in pt.items():
nv = param_vars.get(k, model.named_vars.get(k))
if nv.name in param_vars:
param_vars[nv.name].set_value(v)
p = alpha
# Allow KS test to fail (i.e., the samples be different)
# a certain number of times. Crude, but necessary.
f = fails
while p <= alpha and f > 0:
s0 = pymc_rand()
s1 = floatX(ref_rand(size=size, **pt))
_, p = st.ks_2samp(np.atleast_1d(s0).flatten(), np.atleast_1d(s1).flatten())
f -= 1
assert p > alpha, str(pt)
def pymc_random_discrete(
dist,
paramdomains,
valuedomain=None,
ref_rand=None,
size=100000,
alpha=0.05,
fails=20,
):
if valuedomain is None:
valuedomain = Domain([0], edges=(None, None))
model, param_vars = build_model(dist, valuedomain, paramdomains)
model_dist = change_rv_size(model.named_vars["value"], size, expand=True)
pymc_rand = aesara.function([], model_dist)
domains = paramdomains.copy()
for pt in product(domains, n_samples=100):
pt = pm.Point(pt, model=model)
p = alpha
# Update the shared parameter variables in `param_vars`
for k, v in pt.items():
nv = param_vars.get(k, model.named_vars.get(k))
if nv.name in param_vars:
param_vars[nv.name].set_value(v)
# Allow Chisq test to fail (i.e., the samples be different)
# a certain number of times.
f = fails
while p <= alpha and f > 0:
o = pymc_rand()
e = intX(ref_rand(size=size, **pt))
o = np.atleast_1d(o).flatten()
e = np.atleast_1d(e).flatten()
observed = dict(zip(*np.unique(o, return_counts=True)))
expected = dict(zip(*np.unique(e, return_counts=True)))
for e in expected.keys():
expected[e] = (observed.get(e, 0), expected[e])
k = np.array([v for v in expected.values()])
if np.all(k[:, 0] == k[:, 1]):
p = 1.0
else:
_, p = st.chisquare(k[:, 0], k[:, 1])
f -= 1
assert p > alpha, str(pt)
class BaseTestCases:
class BaseTestCase(SeededTest):
shape = 5
# the following are the default values of the distribution that take effect
# when the parametrized shape/size in the test case is None.
# For every distribution that defaults to non-scalar shapes they must be
# specified by the inheriting Test class. example: TestGaussianRandomWalk
default_shape = ()
default_size = ()
def setup_method(self, *args, **kwargs):
super().setup_method(*args, **kwargs)
self.model = pm.Model()
def get_random_variable(self, shape, with_vector_params=False, name=None):
"""Creates a RandomVariable of the parametrized distribution."""
if with_vector_params:
params = {
key: value * np.ones(self.shape, dtype=np.dtype(type(value)))
for key, value in self.params.items()
}
else:
params = self.params
if name is None:
name = self.distribution.__name__
with self.model:
try:
if shape is None:
# in the test case parametrization "None" means "no specified (default)"
return self.distribution(name, transform=None, **params)
else:
ndim_supp = self.distribution.rv_op.ndim_supp
if ndim_supp == 0:
size = shape
else:
size = shape[:-ndim_supp]
return self.distribution(name, size=size, transform=None, **params)
except TypeError:
if np.sum(np.atleast_1d(shape)) == 0:
pytest.skip("Timeseries must have positive shape")
raise
@staticmethod
def sample_random_variable(random_variable, size):
"""Draws samples from a RandomVariable."""
if size:
random_variable = change_rv_size(random_variable, size, expand=True)
return random_variable.eval()
@pytest.mark.parametrize("size", [None, (), 1, (1,), 5, (4, 5)], ids=str)
@pytest.mark.parametrize("shape", [None, ()], ids=str)
def test_scalar_distribution_shape(self, shape, size):
"""Draws samples of different [size] from a scalar [shape] RV."""
rv = self.get_random_variable(shape)
exp_shape = self.default_shape if shape is None else tuple(np.atleast_1d(shape))
exp_size = self.default_size if size is None else tuple(np.atleast_1d(size))
expected = exp_size + exp_shape
actual = np.shape(self.sample_random_variable(rv, size))
assert (
expected == actual
), f"Sample size {size} from {shape}-shaped RV had shape {actual}. Expected: {expected}"
# check that negative size raises an error
with pytest.raises(ValueError):
self.sample_random_variable(rv, size=-2)
with pytest.raises(ValueError):
self.sample_random_variable(rv, size=(3, -2))
@pytest.mark.parametrize("size", [None, ()], ids=str)
@pytest.mark.parametrize(
"shape", [None, (), (1,), (1, 1), (1, 2), (10, 11, 1), (9, 10, 2)], ids=str
)
def test_scalar_sample_shape(self, shape, size):
"""Draws samples of scalar [size] from a [shape] RV."""
rv = self.get_random_variable(shape)
exp_shape = self.default_shape if shape is None else tuple(np.atleast_1d(shape))
exp_size = self.default_size if size is None else tuple(np.atleast_1d(size))
expected = exp_size + exp_shape
actual = np.shape(self.sample_random_variable(rv, size))
assert (
expected == actual
), f"Sample size {size} from {shape}-shaped RV had shape {actual}. Expected: {expected}"
@pytest.mark.parametrize("size", [None, 3, (4, 5)], ids=str)
@pytest.mark.parametrize("shape", [None, 1, (10, 11, 1)], ids=str)
def test_vector_params(self, shape, size):
shape = self.shape
rv = self.get_random_variable(shape, with_vector_params=True)
exp_shape = self.default_shape if shape is None else tuple(np.atleast_1d(shape))
exp_size = self.default_size if size is None else tuple(np.atleast_1d(size))
expected = exp_size + exp_shape
actual = np.shape(self.sample_random_variable(rv, size))
assert (
expected == actual
), f"Sample size {size} from {shape}-shaped RV had shape {actual}. Expected: {expected}"
@pytest.mark.xfail(reason="This distribution has not been refactored for v4")
class TestGaussianRandomWalk(BaseTestCases.BaseTestCase):
distribution = pm.GaussianRandomWalk
params = {"mu": 1.0, "sigma": 1.0}
default_shape = (1,)
@pytest.mark.xfail(reason="This distribution has not been refactored for v4")
class TestZeroInflatedNegativeBinomial(BaseTestCases.BaseTestCase):
distribution = pm.ZeroInflatedNegativeBinomial
params = {"mu": 1.0, "alpha": 1.0, "psi": 0.3}
@pytest.mark.xfail(reason="This distribution has not been refactored for v4")
class TestZeroInflatedBinomial(BaseTestCases.BaseTestCase):
distribution = pm.ZeroInflatedBinomial
params = {"n": 10, "p": 0.6, "psi": 0.3}
class BaseTestDistribution(SeededTest):
"""
This class provides a base for tests that new RandomVariables are correctly
implemented, and that the mapping of parameters between the PyMC
Distribution and the respective RandomVariable is correct.
Three default tests are provided which check:
1. Expected inputs are passed to the `rv_op` by the `dist` `classmethod`,
via `check_pymc_params_match_rv_op`
2. Expected (exact) draws are being returned, via
`check_pymc_draws_match_reference`
3. Shape variable inference is correct, via `check_rv_size`
Each desired test must be referenced by name in `tests_to_run`, when
subclassing this distribution. Custom tests can be added to each class as
well. See `TestFlat` for an example.
Additional tests should be added for each optional parametrization of the
distribution. In this case it's enough to include the test
`check_pymc_params_match_rv_op` since only this differs.
Note on `check_rv_size` test:
Custom input sizes (and expected output shapes) can be defined for the
`check_rv_size` test, by adding the optional class attributes
`sizes_to_check` and `sizes_expected`:
```python
sizes_to_check = [None, (1), (2, 3)]
sizes_expected = [(3,), (1, 3), (2, 3, 3)]
tests_to_run = ["check_rv_size"]
```
This is usually needed for Multivariate distributions. You can see an
example in `TestDirichlet`
Notes on `check_pymcs_draws_match_reference` test:
The `check_pymcs_draws_match_reference` is a very simple test for the
equality of draws from the `RandomVariable` and the exact same python
function, given the same inputs and random seed. A small number
(`size=15`) is checked. This is not supposed to be a test for the
correctness of the random generator. The latter kind of test
(if warranted) can be performed with the aid of `pymc_random` and
`pymc_random_discrete` methods in this file, which will perform an
expensive statistical comparison between the RandomVariable `rng_fn`
and a reference Python function. This kind of test only makes sense if
there is a good independent generator reference (i.e., not just the same
composition of numpy / scipy python calls that is done inside `rng_fn`).
Finally, when your `rng_fn` is doing something more than just calling a
`numpy` or `scipy` method, you will need to setup an equivalent seeded
function with which to compare for the exact draws (instead of relying on
`seeded_[scipy|numpy]_distribution_builder`). You can find an example
in the `TestWeibull`, whose `rng_fn` returns
`beta * np.random.weibull(alpha, size=size)`.
"""
pymc_dist: Optional[Callable] = None
pymc_dist_params = dict()
reference_dist: Optional[Callable] = None
reference_dist_params = dict()
expected_rv_op_params = dict()
tests_to_run = []
size = 15
decimal = select_by_precision(float64=6, float32=3)
sizes_to_check: Optional[List] = None
sizes_expected: Optional[List] = None
repeated_params_shape = 5
def test_distribution(self):
self.validate_tests_list()
self._instantiate_pymc_rv()
if self.reference_dist is not None:
self.reference_dist_draws = self.reference_dist()(
size=self.size, **self.reference_dist_params
)
for check_name in self.tests_to_run:
getattr(self, check_name)()
def _instantiate_pymc_rv(self, dist_params=None):
params = dist_params if dist_params else self.pymc_dist_params
self.pymc_rv = self.pymc_dist.dist(
**params, size=self.size, rng=aesara.shared(self.get_random_state(reset=True))
)
def check_pymc_draws_match_reference(self):
# need to re-instantiate it to make sure that the order of drawings match the reference distribution one
# self._instantiate_pymc_rv()
assert_array_almost_equal(
self.pymc_rv.eval(), self.reference_dist_draws, decimal=self.decimal
)
def check_pymc_params_match_rv_op(self):
aesera_dist_inputs = self.pymc_rv.get_parents()[0].inputs[3:]
assert len(self.expected_rv_op_params) == len(aesera_dist_inputs)
for (expected_name, expected_value), actual_variable in zip(
self.expected_rv_op_params.items(), aesera_dist_inputs
):
assert_almost_equal(expected_value, actual_variable.eval(), decimal=self.decimal)
def check_rv_size(self):
# test sizes
sizes_to_check = self.sizes_to_check or [None, (), 1, (1,), 5, (4, 5), (2, 4, 2)]
sizes_expected = self.sizes_expected or [(), (), (1,), (1,), (5,), (4, 5), (2, 4, 2)]
for size, expected in zip(sizes_to_check, sizes_expected):
pymc_rv = self.pymc_dist.dist(**self.pymc_dist_params, size=size)
actual = tuple(pymc_rv.shape.eval())
assert actual == expected, f"size={size}, expected={expected}, actual={actual}"
# test multi-parameters sampling for univariate distributions (with univariate inputs)
if (
self.pymc_dist.rv_op.ndim_supp == 0
and self.pymc_dist.rv_op.ndims_params
and sum(self.pymc_dist.rv_op.ndims_params) == 0
):
params = {
k: p * np.ones(self.repeated_params_shape) for k, p in self.pymc_dist_params.items()
}
self._instantiate_pymc_rv(params)
sizes_to_check = [None, self.repeated_params_shape, (5, self.repeated_params_shape)]
sizes_expected = [
(self.repeated_params_shape,),
(self.repeated_params_shape,),
(5, self.repeated_params_shape),
]
for size, expected in zip(sizes_to_check, sizes_expected):
pymc_rv = self.pymc_dist.dist(**params, size=size)
actual = tuple(pymc_rv.shape.eval())
assert actual == expected
def validate_tests_list(self):
assert len(self.tests_to_run) == len(
set(self.tests_to_run)
), "There are duplicates in the list of tests_to_run"
def seeded_scipy_distribution_builder(dist_name: str) -> Callable:
return lambda self: functools.partial(
getattr(st, dist_name).rvs, random_state=self.get_random_state()
)
def seeded_numpy_distribution_builder(dist_name: str) -> Callable:
return lambda self: functools.partial(
getattr(np.random.RandomState, dist_name), self.get_random_state()
)
class TestFlat(BaseTestDistribution):
pymc_dist = pm.Flat
pymc_dist_params = {}
expected_rv_op_params = {}
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_rv_size",
"check_not_implemented",
]
def check_not_implemented(self):
with pytest.raises(NotImplementedError):
self.pymc_rv.eval()
class TestHalfFlat(BaseTestDistribution):
pymc_dist = pm.HalfFlat
pymc_dist_params = {}
expected_rv_op_params = {}
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_rv_size",
"check_not_implemented",
]
def check_not_implemented(self):
with pytest.raises(NotImplementedError):
self.pymc_rv.eval()
class TestDiscreteWeibull(BaseTestDistribution):
def discrete_weibul_rng_fn(self, size, q, beta, uniform_rng_fct):
return np.ceil(np.power(np.log(1 - uniform_rng_fct(size=size)) / np.log(q), 1.0 / beta)) - 1
def seeded_discrete_weibul_rng_fn(self):
uniform_rng_fct = functools.partial(
getattr(np.random.RandomState, "uniform"), self.get_random_state()
)
return functools.partial(self.discrete_weibul_rng_fn, uniform_rng_fct=uniform_rng_fct)
pymc_dist = pm.DiscreteWeibull
pymc_dist_params = {"q": 0.25, "beta": 2.0}
expected_rv_op_params = {"q": 0.25, "beta": 2.0}
reference_dist_params = {"q": 0.25, "beta": 2.0}
reference_dist = seeded_discrete_weibul_rng_fn
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestPareto(BaseTestDistribution):
pymc_dist = pm.Pareto
pymc_dist_params = {"alpha": 3.0, "m": 2.0}
expected_rv_op_params = {"alpha": 3.0, "m": 2.0}
reference_dist_params = {"b": 3.0, "scale": 2.0}
reference_dist = seeded_scipy_distribution_builder("pareto")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestLaplace(BaseTestDistribution):
pymc_dist = pm.Laplace
pymc_dist_params = {"mu": 0.0, "b": 1.0}
expected_rv_op_params = {"mu": 0.0, "b": 1.0}
reference_dist_params = {"loc": 0.0, "scale": 1.0}
reference_dist = seeded_scipy_distribution_builder("laplace")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestAsymmetricLaplace(BaseTestDistribution):
def asymmetriclaplace_rng_fn(self, b, kappa, mu, size, uniform_rng_fct):
u = uniform_rng_fct(size=size)
switch = kappa ** 2 / (1 + kappa ** 2)
non_positive_x = mu + kappa * np.log(u * (1 / switch)) / b
positive_x = mu - np.log((1 - u) * (1 + kappa ** 2)) / (kappa * b)
draws = non_positive_x * (u <= switch) + positive_x * (u > switch)
return draws
def seeded_asymmetriclaplace_rng_fn(self):
uniform_rng_fct = functools.partial(
getattr(np.random.RandomState, "uniform"), self.get_random_state()
)
return functools.partial(self.asymmetriclaplace_rng_fn, uniform_rng_fct=uniform_rng_fct)
pymc_dist = pm.AsymmetricLaplace
pymc_dist_params = {"b": 1.0, "kappa": 1.0, "mu": 0.0}
expected_rv_op_params = {"b": 1.0, "kappa": 1.0, "mu": 0.0}
reference_dist_params = {"b": 1.0, "kappa": 1.0, "mu": 0.0}
reference_dist = seeded_asymmetriclaplace_rng_fn
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestExGaussian(BaseTestDistribution):
def exgaussian_rng_fn(self, mu, sigma, nu, size, normal_rng_fct, exponential_rng_fct):
return normal_rng_fct(mu, sigma, size=size) + exponential_rng_fct(scale=nu, size=size)
def seeded_exgaussian_rng_fn(self):
normal_rng_fct = functools.partial(
getattr(np.random.RandomState, "normal"), self.get_random_state()
)
exponential_rng_fct = functools.partial(
getattr(np.random.RandomState, "exponential"), self.get_random_state()
)
return functools.partial(
self.exgaussian_rng_fn,
normal_rng_fct=normal_rng_fct,
exponential_rng_fct=exponential_rng_fct,
)
pymc_dist = pm.ExGaussian
pymc_dist_params = {"mu": 1.0, "sigma": 1.0, "nu": 1.0}
expected_rv_op_params = {"mu": 1.0, "sigma": 1.0, "nu": 1.0}
reference_dist_params = {"mu": 1.0, "sigma": 1.0, "nu": 1.0}
reference_dist = seeded_exgaussian_rng_fn
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestGumbel(BaseTestDistribution):
pymc_dist = pm.Gumbel
pymc_dist_params = {"mu": 1.5, "beta": 3.0}
expected_rv_op_params = {"mu": 1.5, "beta": 3.0}
reference_dist_params = {"loc": 1.5, "scale": 3.0}
reference_dist = seeded_scipy_distribution_builder("gumbel_r")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
]
class TestStudentT(BaseTestDistribution):
pymc_dist = pm.StudentT
pymc_dist_params = {"nu": 5.0, "mu": -1.0, "sigma": 2.0}
expected_rv_op_params = {"nu": 5.0, "mu": -1.0, "sigma": 2.0}
reference_dist_params = {"df": 5.0, "loc": -1.0, "scale": 2.0}
reference_dist = seeded_scipy_distribution_builder("t")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestMoyal(BaseTestDistribution):
pymc_dist = pm.Moyal
pymc_dist_params = {"mu": 0.0, "sigma": 1.0}
expected_rv_op_params = {"mu": 0.0, "sigma": 1.0}
reference_dist_params = {"loc": 0.0, "scale": 1.0}
reference_dist = seeded_scipy_distribution_builder("moyal")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestKumaraswamy(BaseTestDistribution):
def kumaraswamy_rng_fn(self, a, b, size, uniform_rng_fct):
return (1 - (1 - uniform_rng_fct(size=size)) ** (1 / b)) ** (1 / a)
def seeded_kumaraswamy_rng_fn(self):
uniform_rng_fct = functools.partial(
getattr(np.random.RandomState, "uniform"), self.get_random_state()
)
return functools.partial(self.kumaraswamy_rng_fn, uniform_rng_fct=uniform_rng_fct)
pymc_dist = pm.Kumaraswamy
pymc_dist_params = {"a": 1.0, "b": 1.0}
expected_rv_op_params = {"a": 1.0, "b": 1.0}
reference_dist_params = {"a": 1.0, "b": 1.0}
reference_dist = seeded_kumaraswamy_rng_fn
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestTruncatedNormal(BaseTestDistribution):
pymc_dist = pm.TruncatedNormal
lower, upper, mu, sigma = -2.0, 2.0, 0, 1.0
pymc_dist_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper}
expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper}
reference_dist_params = {
"loc": mu,
"scale": sigma,
"a": (lower - mu) / sigma,
"b": (upper - mu) / sigma,
}
reference_dist = seeded_scipy_distribution_builder("truncnorm")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestTruncatedNormalTau(BaseTestDistribution):
pymc_dist = pm.TruncatedNormal
lower, upper, mu, tau = -2.0, 2.0, 0, 1.0
tau, sigma = get_tau_sigma(tau=tau, sigma=None)
pymc_dist_params = {"mu": mu, "tau": tau, "lower": lower, "upper": upper}
expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper}
tests_to_run = [
"check_pymc_params_match_rv_op",
]
class TestTruncatedNormalLowerTau(BaseTestDistribution):
pymc_dist = pm.TruncatedNormal
lower, upper, mu, tau = -2.0, np.inf, 0, 1.0
tau, sigma = get_tau_sigma(tau=tau, sigma=None)
pymc_dist_params = {"mu": mu, "tau": tau, "lower": lower}
expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper}
tests_to_run = [
"check_pymc_params_match_rv_op",
]
class TestTruncatedNormalUpperTau(BaseTestDistribution):
pymc_dist = pm.TruncatedNormal
lower, upper, mu, tau = -np.inf, 2.0, 0, 1.0
tau, sigma = get_tau_sigma(tau=tau, sigma=None)
pymc_dist_params = {"mu": mu, "tau": tau, "upper": upper}
expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper}
tests_to_run = [
"check_pymc_params_match_rv_op",
]
class TestTruncatedNormalUpperArray(BaseTestDistribution):
pymc_dist = pm.TruncatedNormal
lower, upper, mu, tau = (
np.array([-np.inf, -np.inf]),
np.array([3, 2]),
np.array([0, 0]),
np.array(
[
1,
1,
]
),
)
size = (15, 2)
tau, sigma = get_tau_sigma(tau=tau, sigma=None)
pymc_dist_params = {"mu": mu, "tau": tau, "upper": upper}
expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper}
tests_to_run = [
"check_pymc_params_match_rv_op",
]
class TestWald(BaseTestDistribution):
pymc_dist = pm.Wald
mu, lam, alpha = 1.0, 1.0, 0.0
mu_rv, lam_rv, phi_rv = pm.Wald.get_mu_lam_phi(mu=mu, lam=lam, phi=None)
pymc_dist_params = {"mu": mu, "lam": lam, "alpha": alpha}
expected_rv_op_params = {"mu": mu_rv, "lam": lam_rv, "alpha": alpha}
reference_dist_params = [mu, lam_rv]
reference_dist = seeded_numpy_distribution_builder("wald")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
def test_distribution(self):
self.validate_tests_list()
self._instantiate_pymc_rv()
if self.reference_dist is not None:
self.reference_dist_draws = self.reference_dist()(
*self.reference_dist_params, self.size
)
for check_name in self.tests_to_run:
getattr(self, check_name)()
def check_pymc_draws_match_reference(self):
assert_array_almost_equal(
self.pymc_rv.eval(), self.reference_dist_draws + self.alpha, decimal=self.decimal
)
class TestWaldMuPhi(BaseTestDistribution):
pymc_dist = pm.Wald
mu, phi, alpha = 1.0, 3.0, 0.0
mu_rv, lam_rv, phi_rv = pm.Wald.get_mu_lam_phi(mu=mu, lam=None, phi=phi)
pymc_dist_params = {"mu": mu, "phi": phi, "alpha": alpha}
expected_rv_op_params = {"mu": mu_rv, "lam": lam_rv, "alpha": alpha}
tests_to_run = [
"check_pymc_params_match_rv_op",
]
class TestSkewNormal(BaseTestDistribution):
pymc_dist = pm.SkewNormal
pymc_dist_params = {"mu": 0.0, "sigma": 1.0, "alpha": 5.0}
expected_rv_op_params = {"mu": 0.0, "sigma": 1.0, "alpha": 5.0}
reference_dist_params = {"loc": 0.0, "scale": 1.0, "a": 5.0}
reference_dist = seeded_scipy_distribution_builder("skewnorm")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestSkewNormalTau(BaseTestDistribution):
pymc_dist = pm.SkewNormal
tau, sigma = get_tau_sigma(tau=2.0)
pymc_dist_params = {"mu": 0.0, "tau": tau, "alpha": 5.0}
expected_rv_op_params = {"mu": 0.0, "sigma": sigma, "alpha": 5.0}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestRice(BaseTestDistribution):
pymc_dist = pm.Rice
b, sigma = 1, 2
pymc_dist_params = {"b": b, "sigma": sigma}
expected_rv_op_params = {"b": b, "sigma": sigma}
reference_dist_params = {"b": b, "scale": sigma}
reference_dist = seeded_scipy_distribution_builder("rice")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestRiceNu(BaseTestDistribution):
pymc_dist = pm.Rice
nu = sigma = 2
pymc_dist_params = {"nu": nu, "sigma": sigma}
expected_rv_op_params = {"b": nu / sigma, "sigma": sigma}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestStudentTLam(BaseTestDistribution):
pymc_dist = pm.StudentT
lam, sigma = get_tau_sigma(tau=2.0)
pymc_dist_params = {"nu": 5.0, "mu": -1.0, "lam": lam}
expected_rv_op_params = {"nu": 5.0, "mu": -1.0, "lam": sigma}
reference_dist_params = {"df": 5.0, "loc": -1.0, "scale": sigma}
reference_dist = seeded_scipy_distribution_builder("t")
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestNormal(BaseTestDistribution):
pymc_dist = pm.Normal
pymc_dist_params = {"mu": 5.0, "sigma": 10.0}
expected_rv_op_params = {"mu": 5.0, "sigma": 10.0}
reference_dist_params = {"loc": 5.0, "scale": 10.0}
size = 15
reference_dist = seeded_numpy_distribution_builder("normal")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestLogitNormal(BaseTestDistribution):
def logit_normal_rng_fn(self, rng, size, loc, scale):
return expit(st.norm.rvs(loc=loc, scale=scale, size=size, random_state=rng))
pymc_dist = pm.LogitNormal
pymc_dist_params = {"mu": 5.0, "sigma": 10.0}
expected_rv_op_params = {"mu": 5.0, "sigma": 10.0}
reference_dist_params = {"loc": 5.0, "scale": 10.0}
reference_dist = lambda self: functools.partial(
self.logit_normal_rng_fn, rng=self.get_random_state()
)
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestLogitNormalTau(BaseTestDistribution):
pymc_dist = pm.LogitNormal
tau, sigma = get_tau_sigma(tau=25.0)
pymc_dist_params = {"mu": 1.0, "tau": tau}
expected_rv_op_params = {"mu": 1.0, "sigma": sigma}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestNormalTau(BaseTestDistribution):
pymc_dist = pm.Normal
tau, sigma = get_tau_sigma(tau=25.0)
pymc_dist_params = {"mu": 1.0, "tau": tau}
expected_rv_op_params = {"mu": 1.0, "sigma": sigma}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestNormalSd(BaseTestDistribution):
pymc_dist = pm.Normal
pymc_dist_params = {"mu": 1.0, "sd": 5.0}
expected_rv_op_params = {"mu": 1.0, "sigma": 5.0}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestUniform(BaseTestDistribution):
pymc_dist = pm.Uniform
pymc_dist_params = {"lower": 0.5, "upper": 1.5}
expected_rv_op_params = {"lower": 0.5, "upper": 1.5}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestHalfNormal(BaseTestDistribution):
pymc_dist = pm.HalfNormal
pymc_dist_params = {"sigma": 10.0}
expected_rv_op_params = {"mean": 0, "sigma": 10.0}
reference_dist_params = {"loc": 0, "scale": 10.0}
reference_dist = seeded_scipy_distribution_builder("halfnorm")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
]
class TestHalfNormalTau(BaseTestDistribution):
pymc_dist = pm.Normal
tau, sigma = get_tau_sigma(tau=25.0)
pymc_dist_params = {"tau": tau}
expected_rv_op_params = {"mu": 0.0, "sigma": sigma}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestHalfNormalSd(BaseTestDistribution):
pymc_dist = pm.Normal
pymc_dist_params = {"sd": 5.0}
expected_rv_op_params = {"mu": 0.0, "sigma": 5.0}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestBeta(BaseTestDistribution):
pymc_dist = pm.Beta
pymc_dist_params = {"alpha": 2.0, "beta": 5.0}
expected_rv_op_params = {"alpha": 2.0, "beta": 5.0}
reference_dist_params = {"a": 2.0, "b": 5.0}
size = 15
reference_dist = lambda self: functools.partial(
clipped_beta_rvs, random_state=self.get_random_state()
)
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestBetaMuSigma(BaseTestDistribution):
pymc_dist = pm.Beta
pymc_dist_params = {"mu": 0.5, "sigma": 0.25}
expected_alpha, expected_beta = pm.Beta.get_alpha_beta(
mu=pymc_dist_params["mu"], sigma=pymc_dist_params["sigma"]
)
expected_rv_op_params = {"alpha": expected_alpha, "beta": expected_beta}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestExponential(BaseTestDistribution):
pymc_dist = pm.Exponential
pymc_dist_params = {"lam": 10.0}
expected_rv_op_params = {"mu": 1.0 / pymc_dist_params["lam"]}
reference_dist_params = {"scale": 1.0 / pymc_dist_params["lam"]}
reference_dist = seeded_numpy_distribution_builder("exponential")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
]
class TestCauchy(BaseTestDistribution):
pymc_dist = pm.Cauchy
pymc_dist_params = {"alpha": 2.0, "beta": 5.0}
expected_rv_op_params = {"alpha": 2.0, "beta": 5.0}
reference_dist_params = {"loc": 2.0, "scale": 5.0}
reference_dist = seeded_scipy_distribution_builder("cauchy")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
]
class TestHalfCauchy(BaseTestDistribution):
pymc_dist = pm.HalfCauchy
pymc_dist_params = {"beta": 5.0}
expected_rv_op_params = {"alpha": 0.0, "beta": 5.0}
reference_dist_params = {"loc": 0.0, "scale": 5.0}
reference_dist = seeded_scipy_distribution_builder("halfcauchy")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
]
class TestGamma(BaseTestDistribution):
pymc_dist = pm.Gamma
pymc_dist_params = {"alpha": 2.0, "beta": 5.0}
expected_rv_op_params = {"alpha": 2.0, "beta": 1 / 5.0}
reference_dist_params = {"shape": 2.0, "scale": 1 / 5.0}
reference_dist = seeded_numpy_distribution_builder("gamma")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
]
class TestGammaMuSigma(BaseTestDistribution):
pymc_dist = pm.Gamma
pymc_dist_params = {"mu": 0.5, "sigma": 0.25}
expected_alpha, expected_beta = pm.Gamma.get_alpha_beta(
mu=pymc_dist_params["mu"], sigma=pymc_dist_params["sigma"]
)
expected_rv_op_params = {"alpha": expected_alpha, "beta": 1 / expected_beta}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestInverseGamma(BaseTestDistribution):
pymc_dist = pm.InverseGamma
pymc_dist_params = {"alpha": 2.0, "beta": 5.0}
expected_rv_op_params = {"alpha": 2.0, "beta": 5.0}
reference_dist_params = {"a": 2.0, "scale": 5.0}
reference_dist = seeded_scipy_distribution_builder("invgamma")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
]
class TestInverseGammaMuSigma(BaseTestDistribution):
pymc_dist = pm.InverseGamma
pymc_dist_params = {"mu": 0.5, "sigma": 0.25}
expected_alpha, expected_beta = pm.InverseGamma._get_alpha_beta(
alpha=None,
beta=None,
mu=pymc_dist_params["mu"],
sigma=pymc_dist_params["sigma"],
)
expected_rv_op_params = {"alpha": expected_alpha, "beta": expected_beta}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestChiSquared(BaseTestDistribution):
pymc_dist = pm.ChiSquared
pymc_dist_params = {"nu": 2.0}
expected_rv_op_params = {"nu": 2.0}
reference_dist_params = {"df": 2.0}
reference_dist = seeded_numpy_distribution_builder("chisquare")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestBinomial(BaseTestDistribution):
pymc_dist = pm.Binomial
pymc_dist_params = {"n": 100, "p": 0.33}
expected_rv_op_params = {"n": 100, "p": 0.33}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestNegativeBinomial(BaseTestDistribution):
pymc_dist = pm.NegativeBinomial
pymc_dist_params = {"n": 100, "p": 0.33}
expected_rv_op_params = {"n": 100, "p": 0.33}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestNegativeBinomialMuSigma(BaseTestDistribution):
pymc_dist = pm.NegativeBinomial
pymc_dist_params = {"mu": 5.0, "alpha": 8.0}
expected_n, expected_p = pm.NegativeBinomial.get_n_p(
mu=pymc_dist_params["mu"],
alpha=pymc_dist_params["alpha"],
n=None,
p=None,
)
expected_rv_op_params = {"n": expected_n, "p": expected_p}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestBernoulli(BaseTestDistribution):
pymc_dist = pm.Bernoulli
pymc_dist_params = {"p": 0.33}
expected_rv_op_params = {"p": 0.33}
reference_dist_params = {"p": 0.33}
reference_dist = seeded_scipy_distribution_builder("bernoulli")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
]
class TestBernoulliLogitP(BaseTestDistribution):
pymc_dist = pm.Bernoulli
pymc_dist_params = {"logit_p": 1.0}
expected_rv_op_params = {"p": expit(1.0)}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestPoisson(BaseTestDistribution):
pymc_dist = pm.Poisson
pymc_dist_params = {"mu": 4.0}
expected_rv_op_params = {"mu": 4.0}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestMvNormalCov(BaseTestDistribution):
pymc_dist = pm.MvNormal
pymc_dist_params = {
"mu": np.array([1.0, 2.0]),
"cov": np.array([[2.0, 0.0], [0.0, 3.5]]),
}
expected_rv_op_params = {
"mu": np.array([1.0, 2.0]),
"cov": np.array([[2.0, 0.0], [0.0, 3.5]]),
}
sizes_to_check = [None, (1), (2, 3)]
sizes_expected = [(2,), (1, 2), (2, 3, 2)]
reference_dist_params = {
"mean": np.array([1.0, 2.0]),
"cov": np.array([[2.0, 0.0], [0.0, 3.5]]),
}
reference_dist = seeded_numpy_distribution_builder("multivariate_normal")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestMvNormalChol(BaseTestDistribution):
pymc_dist = pm.MvNormal
pymc_dist_params = {
"mu": np.array([1.0, 2.0]),
"chol": np.array([[2.0, 0.0], [0.0, 3.5]]),
}
expected_rv_op_params = {
"mu": np.array([1.0, 2.0]),
"cov": quaddist_matrix(chol=pymc_dist_params["chol"]).eval(),
}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestMvNormalTau(BaseTestDistribution):
pymc_dist = pm.MvNormal
pymc_dist_params = {
"mu": np.array([1.0, 2.0]),
"tau": np.array([[2.0, 0.0], [0.0, 3.5]]),
}
expected_rv_op_params = {
"mu": np.array([1.0, 2.0]),
"cov": quaddist_matrix(tau=pymc_dist_params["tau"]).eval(),
}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestMvStudentTCov(BaseTestDistribution):
def mvstudentt_rng_fn(self, size, nu, mu, cov, rng):
chi2_samples = rng.chisquare(nu, size=size)
mv_samples = rng.multivariate_normal(np.zeros_like(mu), cov, size=size)
return (mv_samples / np.sqrt(chi2_samples[:, None] / nu)) + mu
pymc_dist = pm.MvStudentT
pymc_dist_params = {
"nu": 5,
"mu": np.array([1.0, 2.0]),
"cov": np.array([[2.0, 0.0], [0.0, 3.5]]),
}
expected_rv_op_params = {
"nu": 5,
"mu": np.array([1.0, 2.0]),
"cov": np.array([[2.0, 0.0], [0.0, 3.5]]),
}
sizes_to_check = [None, (1), (2, 3)]
sizes_expected = [(2,), (1, 2), (2, 3, 2)]
reference_dist_params = {
"nu": 5,
"mu": np.array([1.0, 2.0]),
"cov": np.array([[2.0, 0.0], [0.0, 3.5]]),
}
reference_dist = lambda self: functools.partial(
self.mvstudentt_rng_fn, rng=self.get_random_state()
)
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestMvStudentTChol(BaseTestDistribution):
pymc_dist = pm.MvStudentT
pymc_dist_params = {
"nu": 5,
"mu": np.array([1.0, 2.0]),
"chol": np.array([[2.0, 0.0], [0.0, 3.5]]),
}
expected_rv_op_params = {
"nu": 5,
"mu": np.array([1.0, 2.0]),
"cov": quaddist_matrix(chol=pymc_dist_params["chol"]).eval(),
}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestMvStudentTTau(BaseTestDistribution):
pymc_dist = pm.MvStudentT
pymc_dist_params = {
"nu": 5,
"mu": np.array([1.0, 2.0]),
"tau": np.array([[2.0, 0.0], [0.0, 3.5]]),
}
expected_rv_op_params = {
"nu": 5,
"mu": np.array([1.0, 2.0]),
"cov": quaddist_matrix(tau=pymc_dist_params["tau"]).eval(),
}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestDirichlet(BaseTestDistribution):
pymc_dist = pm.Dirichlet
pymc_dist_params = {"a": np.array([1.0, 2.0])}
expected_rv_op_params = {"a": np.array([1.0, 2.0])}
sizes_to_check = [None, (1), (4,), (3, 4)]
sizes_expected = [(2,), (1, 2), (4, 2), (3, 4, 2)]
reference_dist_params = {"alpha": np.array([1.0, 2.0])}
reference_dist = seeded_numpy_distribution_builder("dirichlet")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestMultinomial(BaseTestDistribution):
pymc_dist = pm.Multinomial
pymc_dist_params = {"n": 85, "p": np.array([0.28, 0.62, 0.10])}
expected_rv_op_params = {"n": 85, "p": np.array([0.28, 0.62, 0.10])}
sizes_to_check = [None, (1), (4,), (3, 2)]
sizes_expected = [(3,), (1, 3), (4, 3), (3, 2, 3)]
reference_dist_params = {"n": 85, "pvals": np.array([0.28, 0.62, 0.10])}
reference_dist = seeded_numpy_distribution_builder("multinomial")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestDirichletMultinomial(BaseTestDistribution):
pymc_dist = pm.DirichletMultinomial
pymc_dist_params = {"n": 85, "a": np.array([1.0, 2.0, 1.5, 1.5])}
expected_rv_op_params = {"n": 85, "a": np.array([1.0, 2.0, 1.5, 1.5])}
sizes_to_check = [None, 1, (4,), (3, 4)]
sizes_expected = [(4,), (1, 4), (4, 4), (3, 4, 4)]
tests_to_run = [
"check_pymc_params_match_rv_op",
"test_random_draws",
"check_rv_size",
]
def test_random_draws(self):
default_rng = aesara.shared(np.random.default_rng(1234))
draws = pm.DirichletMultinomial.dist(
n=np.array([5, 100]),
a=np.array([[0.001, 0.001, 0.001, 1000], [1000, 1000, 0.001, 0.001]]),
size=(2, 3),
rng=default_rng,
).eval()
assert np.all(draws.sum(-1) == np.array([5, 100]))
assert np.all((draws.sum(-2)[:, :, 0] > 30) & (draws.sum(-2)[:, :, 0] <= 70))
assert np.all((draws.sum(-2)[:, :, 1] > 30) & (draws.sum(-2)[:, :, 1] <= 70))
assert np.all((draws.sum(-2)[:, :, 2] >= 0) & (draws.sum(-2)[:, :, 2] <= 2))
assert np.all((draws.sum(-2)[:, :, 3] > 3) & (draws.sum(-2)[:, :, 3] <= 5))
class TestDirichletMultinomial_1d_n_2d_a(BaseTestDistribution):
pymc_dist = pm.DirichletMultinomial
pymc_dist_params = {
"n": np.array([23, 29]),
"a": np.array([[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]]),
}
sizes_to_check = [None, 1, (4,), (3, 4)]
sizes_expected = [(2, 4), (1, 2, 4), (4, 2, 4), (3, 4, 2, 4)]
tests_to_run = ["check_rv_size"]
class TestCategorical(BaseTestDistribution):
pymc_dist = pm.Categorical
pymc_dist_params = {"p": np.array([0.28, 0.62, 0.10])}
expected_rv_op_params = {"p": np.array([0.28, 0.62, 0.10])}
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_rv_size",
]
class TestGeometric(BaseTestDistribution):
pymc_dist = pm.Geometric
pymc_dist_params = {"p": 0.9}
expected_rv_op_params = {"p": 0.9}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestHyperGeometric(BaseTestDistribution):
pymc_dist = pm.HyperGeometric
pymc_dist_params = {"N": 20, "k": 12, "n": 5}
expected_rv_op_params = {
"ngood": pymc_dist_params["k"],
"nbad": pymc_dist_params["N"] - pymc_dist_params["k"],
"nsample": pymc_dist_params["n"],
}
reference_dist_params = expected_rv_op_params
reference_dist = seeded_numpy_distribution_builder("hypergeometric")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
]
class TestLogistic(BaseTestDistribution):
pymc_dist = pm.Logistic
pymc_dist_params = {"mu": 1.0, "s": 2.0}
expected_rv_op_params = {"mu": 1.0, "s": 2.0}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestLogNormal(BaseTestDistribution):
pymc_dist = pm.LogNormal
pymc_dist_params = {"mu": 1.0, "sigma": 5.0}
expected_rv_op_params = {"mu": 1.0, "sigma": 5.0}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestLognormalTau(BaseTestDistribution):
pymc_dist = pm.Lognormal
tau, sigma = get_tau_sigma(tau=25.0)
pymc_dist_params = {"mu": 1.0, "tau": 25.0}
expected_rv_op_params = {"mu": 1.0, "sigma": sigma}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestLognormalSd(BaseTestDistribution):
pymc_dist = pm.Lognormal
pymc_dist_params = {"mu": 1.0, "sd": 5.0}
expected_rv_op_params = {"mu": 1.0, "sigma": 5.0}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestTriangular(BaseTestDistribution):
pymc_dist = pm.Triangular
pymc_dist_params = {"lower": 0, "upper": 1, "c": 0.5}
expected_rv_op_params = {"lower": 0, "c": 0.5, "upper": 1}
reference_dist_params = {"left": 0, "mode": 0.5, "right": 1}
reference_dist = seeded_numpy_distribution_builder("triangular")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
]
class TestVonMises(BaseTestDistribution):
pymc_dist = pm.VonMises
pymc_dist_params = {"mu": -2.1, "kappa": 5}
expected_rv_op_params = {"mu": -2.1, "kappa": 5}
tests_to_run = ["check_pymc_params_match_rv_op"]
class TestWeibull(BaseTestDistribution):
def weibull_rng_fn(self, size, alpha, beta, std_weibull_rng_fct):
return beta * std_weibull_rng_fct(alpha, size=size)
def seeded_weibul_rng_fn(self):
std_weibull_rng_fct = functools.partial(
getattr(np.random.RandomState, "weibull"), self.get_random_state()
)
return functools.partial(self.weibull_rng_fn, std_weibull_rng_fct=std_weibull_rng_fct)
pymc_dist = pm.Weibull
pymc_dist_params = {"alpha": 1.0, "beta": 2.0}
expected_rv_op_params = {"alpha": 1.0, "beta": 2.0}
reference_dist_params = {"alpha": 1.0, "beta": 2.0}
reference_dist = seeded_weibul_rng_fn
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestBetaBinomial(BaseTestDistribution):
pymc_dist = pm.BetaBinomial
pymc_dist_params = {"alpha": 2.0, "beta": 1.0, "n": 5}
expected_rv_op_params = {"n": 5, "alpha": 2.0, "beta": 1.0}
reference_dist_params = {"n": 5, "a": 2.0, "b": 1.0}
reference_dist = seeded_scipy_distribution_builder("betabinom")
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
@pytest.mark.skipif(
condition=_polyagamma_not_installed,
reason="`polyagamma package is not available/installed.",
)
class TestPolyaGamma(BaseTestDistribution):
def polyagamma_rng_fn(self, size, h, z, rng):
return random_polyagamma(h, z, size=size, random_state=rng._bit_generator)
pymc_dist = pm.PolyaGamma
pymc_dist_params = {"h": 1.0, "z": 0.0}
expected_rv_op_params = {"h": 1.0, "z": 0.0}
reference_dist_params = {"h": 1.0, "z": 0.0}
reference_dist = lambda self: functools.partial(
self.polyagamma_rng_fn, rng=self.get_random_state()
)
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestDiscreteUniform(BaseTestDistribution):
def discrete_uniform_rng_fn(self, size, lower, upper, rng):
return st.randint.rvs(lower, upper + 1, size=size, random_state=rng)
pymc_dist = pm.DiscreteUniform
pymc_dist_params = {"lower": -1, "upper": 9}
expected_rv_op_params = {"lower": -1, "upper": 9}
reference_dist_params = {"lower": -1, "upper": 9}
reference_dist = lambda self: functools.partial(
self.discrete_uniform_rng_fn, rng=self.get_random_state()
)
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestConstant(BaseTestDistribution):
def constant_rng_fn(self, size, c):
if size is None:
return c
return np.full(size, c)
pymc_dist = pm.Constant
pymc_dist_params = {"c": 3}
expected_rv_op_params = {"c": 3}
reference_dist_params = {"c": 3}
reference_dist = lambda self: self.constant_rng_fn
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestZeroInflatedPoisson(BaseTestDistribution):
def zero_inflated_poisson_rng_fn(self, size, psi, theta, poisson_rng_fct, random_rng_fct):
return poisson_rng_fct(theta, size=size) * (random_rng_fct(size=size) < psi)
def seeded_zero_inflated_poisson_rng_fn(self):
poisson_rng_fct = functools.partial(
getattr(np.random.RandomState, "poisson"), self.get_random_state()
)
random_rng_fct = functools.partial(
getattr(np.random.RandomState, "random"), self.get_random_state()
)
return functools.partial(
self.zero_inflated_poisson_rng_fn,
poisson_rng_fct=poisson_rng_fct,
random_rng_fct=random_rng_fct,
)
pymc_dist = pm.ZeroInflatedPoisson
pymc_dist_params = {"psi": 0.9, "theta": 4.0}
expected_rv_op_params = {"psi": 0.9, "theta": 4.0}
reference_dist_params = {"psi": 0.9, "theta": 4.0}
reference_dist = seeded_zero_inflated_poisson_rng_fn
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestZeroInflatedBinomial(BaseTestDistribution):
def zero_inflated_binomial_rng_fn(self, size, psi, n, p, binomial_rng_fct, random_rng_fct):
return binomial_rng_fct(n, p, size=size) * (random_rng_fct(size=size) < psi)
def seeded_zero_inflated_binomial_rng_fn(self):
binomial_rng_fct = functools.partial(
getattr(np.random.RandomState, "binomial"), self.get_random_state()
)
random_rng_fct = functools.partial(
getattr(np.random.RandomState, "random"), self.get_random_state()
)
return functools.partial(
self.zero_inflated_binomial_rng_fn,
binomial_rng_fct=binomial_rng_fct,
random_rng_fct=random_rng_fct,
)
pymc_dist = pm.ZeroInflatedBinomial
pymc_dist_params = {"psi": 0.9, "n": 12, "p": 0.7}
expected_rv_op_params = {"psi": 0.9, "n": 12, "p": 0.7}
reference_dist_params = {"psi": 0.9, "n": 12, "p": 0.7}
reference_dist = seeded_zero_inflated_binomial_rng_fn
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestZeroInflatedNegativeBinomial(BaseTestDistribution):
def zero_inflated_negbinomial_rng_fn(
self, size, psi, n, p, negbinomial_rng_fct, random_rng_fct
):
return negbinomial_rng_fct(n, p, size=size) * (random_rng_fct(size=size) < psi)
def seeded_zero_inflated_negbinomial_rng_fn(self):
negbinomial_rng_fct = functools.partial(
getattr(np.random.RandomState, "negative_binomial"), self.get_random_state()
)
random_rng_fct = functools.partial(
getattr(np.random.RandomState, "random"), self.get_random_state()
)
return functools.partial(
self.zero_inflated_negbinomial_rng_fn,
negbinomial_rng_fct=negbinomial_rng_fct,
random_rng_fct=random_rng_fct,
)
n, p = pm.NegativeBinomial.get_n_p(mu=3, alpha=5)
pymc_dist = pm.ZeroInflatedNegativeBinomial
pymc_dist_params = {"psi": 0.9, "mu": 3, "alpha": 5}
expected_rv_op_params = {"psi": 0.9, "n": n, "p": p}
reference_dist_params = {"psi": 0.9, "n": n, "p": p}
reference_dist = seeded_zero_inflated_negbinomial_rng_fn
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestOrderedLogistic(BaseTestDistribution):
pymc_dist = _OrderedLogistic
pymc_dist_params = {"eta": 0, "cutpoints": np.array([-2, 0, 2])}
expected_rv_op_params = {"p": np.array([0.11920292, 0.38079708, 0.38079708, 0.11920292])}
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_rv_size",
]
class TestOrderedProbit(BaseTestDistribution):
pymc_dist = _OrderedProbit
pymc_dist_params = {"eta": 0, "cutpoints": np.array([-2, 0, 2])}
expected_rv_op_params = {"p": np.array([0.02275013, 0.47724987, 0.47724987, 0.02275013])}
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_rv_size",
]
class TestOrderedMultinomial(BaseTestDistribution):
pymc_dist = _OrderedMultinomial
pymc_dist_params = {"eta": 0, "cutpoints": np.array([-2, 0, 2]), "n": 1000}
sizes_to_check = [None, (1), (4,), (3, 2)]
sizes_expected = [(4,), (1, 4), (4, 4), (3, 2, 4)]
expected_rv_op_params = {
"n": 1000,
"p": np.array([0.11920292, 0.38079708, 0.38079708, 0.11920292]),
}
tests_to_run = [
"check_pymc_params_match_rv_op",
"check_rv_size",
]
class TestWishart(BaseTestDistribution):
def wishart_rng_fn(self, size, nu, V, rng):
return st.wishart.rvs(np.int(nu), V, size=size, random_state=rng)
pymc_dist = pm.Wishart
V = np.eye(3)
pymc_dist_params = {"nu": 4, "V": V}
reference_dist_params = {"nu": 4, "V": V}
expected_rv_op_params = {"nu": 4, "V": V}
sizes_to_check = [None, 1, (4, 5)]
sizes_expected = [
(3, 3),
(1, 3, 3),
(4, 5, 3, 3),
]
reference_dist = lambda self: functools.partial(
self.wishart_rng_fn, rng=self.get_random_state()
)
tests_to_run = [
"check_rv_size",
"check_pymc_params_match_rv_op",
"check_pymc_draws_match_reference",
]
class TestMatrixNormal(BaseTestDistribution):
pymc_dist = pm.MatrixNormal
mu = np.random.random((3, 3))
row_cov = np.eye(3)
col_cov = np.eye(3)
shape = None
size = None
pymc_dist_params = {"mu": mu, "rowcov": row_cov, "colcov": col_cov}
expected_rv_op_params = {"mu": mu, "rowcov": row_cov, "colcov": col_cov}
tests_to_run = ["check_pymc_params_match_rv_op", "test_matrix_normal", "test_errors"]
def test_matrix_normal(self):
delta = 0.05 # limit for KS p-value
n_fails = 10 # Allows the KS fails a certain number of times
def ref_rand(mu, rowcov, colcov):
return st.matrix_normal.rvs(mean=mu, rowcov=rowcov, colcov=colcov)
with pm.Model(rng_seeder=1):
matrixnormal = pm.MatrixNormal(
"matnormal",
mu=np.random.random((3, 3)),
rowcov=np.eye(3),
colcov=np.eye(3),
)
check = pm.sample_prior_predictive(n_fails, return_inferencedata=False)
ref_smp = ref_rand(mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3))
p, f = delta, n_fails
while p <= delta and f > 0:
matrixnormal_smp = check["matnormal"]
p = np.min(
[
st.ks_2samp(
np.atleast_1d(matrixnormal_smp).flatten(),
np.atleast_1d(ref_smp).flatten(),
)
]
)
f -= 1
assert p > delta
def test_errors(self):
msg = "MatrixNormal doesn't support size argument"
with pm.Model():
with pytest.raises(NotImplementedError, match=msg):
matrixnormal = pm.MatrixNormal(
"matnormal",
mu=np.random.random((3, 3)),
rowcov=np.eye(3),
colcov=np.eye(3),
size=15,
)
with pm.Model():
matrixnormal = pm.MatrixNormal(
"matnormal",
mu=np.random.random((3, 3)),
rowcov=np.eye(3),
colcov=np.eye(3),
)
with pytest.raises(TypeError):
logpt(matrixnormal, aesara.tensor.ones((3, 3, 3)))
with pm.Model():
with pytest.warns(FutureWarning):
matrixnormal = pm.MatrixNormal(
"matnormal",
mu=np.random.random((3, 3)),
rowcov=np.eye(3),
colcov=np.eye(3),
shape=15,
)
class TestInterpolated(BaseTestDistribution):
def interpolated_rng_fn(self, size, mu, sigma, rng):
return st.norm.rvs(loc=mu, scale=sigma, size=size)
pymc_dist = pm.Interpolated
# Dummy values for RV size testing
mu = sigma = 1
x_points = pdf_points = np.linspace(1, 100, 100)
pymc_dist_params = {"x_points": x_points, "pdf_points": pdf_points}
reference_dist_params = {"mu": mu, "sigma": sigma}
reference_dist = lambda self: functools.partial(
self.interpolated_rng_fn, rng=self.get_random_state()
)
tests_to_run = ["check_rv_size", "test_interpolated"]
def test_interpolated(self):
for mu in R.vals:
for sigma in Rplus.vals:
# pylint: disable=cell-var-from-loop
rng = self.get_random_state()
def ref_rand(size):
return st.norm.rvs(loc=mu, scale=sigma, size=size, random_state=rng)
class TestedInterpolated(pm.Interpolated):
rv_op = interpolated
@classmethod
def dist(cls, **kwargs):
x_points = np.linspace(mu - 5 * sigma, mu + 5 * sigma, 100)
pdf_points = st.norm.pdf(x_points, loc=mu, scale=sigma)
return super().dist(x_points=x_points, pdf_points=pdf_points, **kwargs)
pymc_random(
TestedInterpolated,
{},
extra_args={"rng": aesara.shared(rng)},
ref_rand=ref_rand,
)
class TestKroneckerNormal(BaseTestDistribution):
def kronecker_rng_fn(self, size, mu, covs=None, sigma=None, rng=None):
cov = pm.math.kronecker(covs[0], covs[1]).eval()
cov += sigma ** 2 * np.identity(cov.shape[0])
return st.multivariate_normal.rvs(mean=mu, cov=cov, size=size)
pymc_dist = pm.KroneckerNormal
n = 3
N = n ** 2
covs = [RandomPdMatrix(n), RandomPdMatrix(n)]
mu = np.random.random(N) * 0.1
sigma = 1
pymc_dist_params = {"mu": mu, "covs": covs, "sigma": sigma}
expected_rv_op_params = {"mu": mu, "covs": covs, "sigma": sigma}
reference_dist_params = {"mu": mu, "covs": covs, "sigma": sigma}
sizes_to_check = [None, (), 1, (1,), 5, (4, 5), (2, 4, 2)]
sizes_expected = [(N,), (N,), (1, N), (1, N), (5, N), (4, 5, N), (2, 4, 2, N)]
reference_dist = lambda self: functools.partial(
self.kronecker_rng_fn, rng=self.get_random_state()
)
tests_to_run = [
"check_pymc_draws_match_reference",
"check_rv_size",
]
class TestScalarParameterSamples(SeededTest):
@pytest.mark.xfail(reason="This distribution has not been refactored for v4")
def test_lkj(self):
for n in [2, 10, 50]:
# pylint: disable=cell-var-from-loop
shape = n * (n - 1) // 2
def ref_rand(size, eta):
beta = eta - 1 + n / 2
return (st.beta.rvs(size=(size, shape), a=beta, b=beta) - 0.5) * 2
class TestedLKJCorr(pm.LKJCorr):
def __init__(self, **kwargs):
kwargs.pop("shape", None)
super().__init__(n=n, **kwargs)
pymc_random(
TestedLKJCorr,
{"eta": Domain([1.0, 10.0, 100.0])},
size=10000 // n,
ref_rand=ref_rand,
)
@pytest.mark.xfail(reason="This distribution has not been refactored for v4")
def test_normalmixture(self):
def ref_rand(size, w, mu, sigma):
component = np.random.choice(w.size, size=size, p=w)
return np.random.normal(mu[component], sigma[component], size=size)
pymc_random(
pm.NormalMixture,
{
"w": Simplex(2),
"mu": Domain([[0.05, 2.5], [-5.0, 1.0]], edges=(None, None)),
"sigma": Domain([[1, 1], [1.5, 2.0]], edges=(None, None)),
},
extra_args={"comp_shape": 2},
size=1000,
ref_rand=ref_rand,
)
pymc_random(
pm.NormalMixture,
{
"w": Simplex(3),
"mu": Domain([[-5.0, 1.0, 2.5]], edges=(None, None)),
"sigma": Domain([[1.5, 2.0, 3.0]], edges=(None, None)),
},
extra_args={"comp_shape": 3},
size=1000,
ref_rand=ref_rand,
)
@pytest.mark.xfail(reason="This distribution has not been refactored for v4")
def test_mixture_random_shape():
# test the shape broadcasting in mixture random
y = np.concatenate([nr.poisson(5, size=10), nr.poisson(9, size=10)])
with pm.Model() as m:
comp0 = pm.Poisson.dist(mu=np.ones(2))
w0 = pm.Dirichlet("w0", a=np.ones(2), shape=(2,))
like0 = pm.Mixture("like0", w=w0, comp_dists=comp0, observed=y)
comp1 = pm.Poisson.dist(mu=np.ones((20, 2)), shape=(20, 2))
w1 = pm.Dirichlet("w1", a=np.ones(2), shape=(2,))
like1 = pm.Mixture("like1", w=w1, comp_dists=comp1, observed=y)
comp2 = pm.Poisson.dist(mu=np.ones(2))
w2 = pm.Dirichlet("w2", a=np.ones(2), shape=(20, 2))
like2 = pm.Mixture("like2", w=w2, comp_dists=comp2, observed=y)
comp3 = pm.Poisson.dist(mu=np.ones(2), shape=(20, 2))
w3 = pm.Dirichlet("w3", a=np.ones(2), shape=(20, 2))
like3 = pm.Mixture("like3", w=w3, comp_dists=comp3, observed=y)
# XXX: This needs to be refactored
rand0, rand1, rand2, rand3 = [None] * 4 # draw_values(
# [like0, like1, like2, like3], point=m.initial_point, size=100
# )
assert rand0.shape == (100, 20)
assert rand1.shape == (100, 20)
assert rand2.shape == (100, 20)
assert rand3.shape == (100, 20)
with m:
ppc = pm.sample_posterior_predictive([m.initial_point], samples=200)
assert ppc["like0"].shape == (200, 20)
assert ppc["like1"].shape == (200, 20)
assert ppc["like2"].shape == (200, 20)
assert ppc["like3"].shape == (200, 20)
@pytest.mark.xfail(reason="This distribution has not been refactored for v4")
def test_mixture_random_shape_fast():
# test the shape broadcasting in mixture random
y = np.concatenate([nr.poisson(5, size=10), nr.poisson(9, size=10)])
with pm.Model() as m:
comp0 = pm.Poisson.dist(mu=np.ones(2))
w0 = pm.Dirichlet("w0", a=np.ones(2), shape=(2,))
like0 = pm.Mixture("like0", w=w0, comp_dists=comp0, observed=y)
comp1 = pm.Poisson.dist(mu=np.ones((20, 2)), shape=(20, 2))
w1 = pm.Dirichlet("w1", a=np.ones(2), shape=(2,))
like1 = pm.Mixture("like1", w=w1, comp_dists=comp1, observed=y)
comp2 = pm.Poisson.dist(mu=np.ones(2))
w2 = pm.Dirichlet("w2", a=np.ones(2), shape=(20, 2))
like2 = pm.Mixture("like2", w=w2, comp_dists=comp2, observed=y)
comp3 = pm.Poisson.dist(mu=np.ones(2), shape=(20, 2))
w3 = pm.Dirichlet("w3", a=np.ones(2), shape=(20, 2))
like3 = pm.Mixture("like3", w=w3, comp_dists=comp3, observed=y)
# XXX: This needs to be refactored
rand0, rand1, rand2, rand3 = [None] * 4 # draw_values(
# [like0, like1, like2, like3], point=m.initial_point, size=100
# )
assert rand0.shape == (100, 20)
assert rand1.shape == (100, 20)
assert rand2.shape == (100, 20)
assert rand3.shape == (100, 20)
class TestDensityDist:
@pytest.mark.parametrize("size", [(), (3,), (3, 2)], ids=str)
def test_density_dist_with_random(self, size):
with pm.Model() as model:
mu = pm.Normal("mu", 0, 1)
obs = pm.DensityDist(
"density_dist",
mu,
random=lambda mu, rng=None, size=None: rng.normal(loc=mu, scale=1, size=size),
observed=np.random.randn(100, *size),
size=size,
)
assert obs.eval().shape == (100,) + size
def test_density_dist_without_random(self):
with pm.Model() as model:
mu = pm.Normal("mu", 0, 1)
pm.DensityDist(
"density_dist",
mu,
logp=lambda value, mu: logpt(pm.Normal.dist(mu, 1, size=100), value),
observed=np.random.randn(100),
initval=0,
)
idata = pm.sample(tune=50, draws=100, cores=1, step=pm.Metropolis())
samples = 500
with pytest.raises(NotImplementedError):
pm.sample_posterior_predictive(idata, samples=samples, model=model, size=100)
@pytest.mark.parametrize("size", [(), (3,), (3, 2)], ids=str)
def test_density_dist_with_random_multivariate(self, size):
supp_shape = 5
with pm.Model() as model:
mu = pm.Normal("mu", 0, 1, size=supp_shape)
obs = pm.DensityDist(
"density_dist",
mu,
random=lambda mu, rng=None, size=None: rng.multivariate_normal(
mean=mu, cov=np.eye(len(mu)), size=size
),
observed=np.random.randn(100, *size, supp_shape),
size=size,
ndims_params=[1],
ndim_supp=1,
)
assert obs.eval().shape == (100,) + size + (supp_shape,)
class TestNestedRandom(SeededTest):
def build_model(self, distribution, shape, nested_rvs_info):
with pm.Model() as model:
nested_rvs = {}
for rv_name, info in nested_rvs_info.items():
try:
value, nested_shape = info
loc = 0.0
except ValueError:
value, nested_shape, loc = info
if value is None:
nested_rvs[rv_name] = pm.Uniform(
rv_name,
0 + loc,
1 + loc,
shape=nested_shape,
)
else:
nested_rvs[rv_name] = value * np.ones(nested_shape)
rv = distribution(
"target",
shape=shape,
**nested_rvs,
)
return model, rv, nested_rvs
def sample_prior(self, distribution, shape, nested_rvs_info, prior_samples):
model, rv, nested_rvs = self.build_model(
distribution,
shape,
nested_rvs_info,
)
with model:
return pm.sample_prior_predictive(prior_samples, return_inferencedata=False)
@pytest.mark.parametrize(
["prior_samples", "shape", "mu", "alpha"],
[
[10, (3,), (None, tuple()), (None, (3,))],
[10, (3,), (None, (3,)), (None, tuple())],
[
10,
(
4,
3,
),
(None, (3,)),
(None, (3,)),
],
[
10,
(
4,
3,
),
(None, (3,)),
(None, (4, 3)),
],
],
ids=str,
)
def test_NegativeBinomial(
self,
prior_samples,
shape,
mu,
alpha,
):
prior = self.sample_prior(
distribution=pm.NegativeBinomial,
shape=shape,
nested_rvs_info=dict(mu=mu, alpha=alpha),
prior_samples=prior_samples,
)
assert prior["target"].shape == (prior_samples,) + shape
@pytest.mark.parametrize(
["prior_samples", "shape", "psi", "mu", "alpha"],
[
[10, (3,), (0.5, tuple()), (None, tuple()), (None, (3,))],
[10, (3,), (0.5, (3,)), (None, tuple()), (None, (3,))],
[10, (3,), (0.5, tuple()), (None, (3,)), (None, tuple())],
[10, (3,), (0.5, (3,)), (None, (3,)), (None, tuple())],
[
10,
(
4,
3,
),
(0.5, (3,)),
(None, (3,)),
(None, (3,)),
],
[
10,
(
4,
3,
),
(0.5, (3,)),
(None, (3,)),
(None, (4, 3)),
],
],
ids=str,
)
def test_ZeroInflatedNegativeBinomial(
self,
prior_samples,
shape,
psi,
mu,
alpha,
):
prior = self.sample_prior(
distribution=pm.ZeroInflatedNegativeBinomial,
shape=shape,
nested_rvs_info=dict(psi=psi, mu=mu, alpha=alpha),
prior_samples=prior_samples,
)
assert prior["target"].shape == (prior_samples,) + shape
@pytest.mark.parametrize(
["prior_samples", "shape", "nu", "sigma"],
[
[10, (3,), (None, tuple()), (None, (3,))],
[10, (3,), (None, tuple()), (None, (3,))],
[10, (3,), (None, (3,)), (None, tuple())],
[10, (3,), (None, (3,)), (None, tuple())],
[
10,
(
4,
3,
),
(None, (3,)),
(None, (3,)),
],
[
10,
(
4,
3,
),
(None, (3,)),
(None, (4, 3)),
],
],
ids=str,
)
def test_Rice(
self,
prior_samples,
shape,
nu,
sigma,
):
prior = self.sample_prior(
distribution=pm.Rice,
shape=shape,
nested_rvs_info=dict(nu=nu, sigma=sigma),
prior_samples=prior_samples,
)
assert prior["target"].shape == (prior_samples,) + shape
@pytest.mark.parametrize(
["prior_samples", "shape", "mu", "sigma", "lower", "upper"],
[
[10, (3,), (None, tuple()), (1.0, tuple()), (None, tuple(), -1), (None, (3,))],
[10, (3,), (None, tuple()), (1.0, tuple()), (None, tuple(), -1), (None, (3,))],
[10, (3,), (None, tuple()), (1.0, tuple()), (None, (3,), -1), (None, tuple())],
[10, (3,), (None, tuple()), (1.0, tuple()), (None, (3,), -1), (None, tuple())],
[
10,
(
4,
3,
),
(None, (3,)),
(1.0, tuple()),
(None, (3,), -1),
(None, (3,)),
],
[
10,
(
4,
3,
),
(None, (3,)),
(1.0, tuple()),
(None, (3,), -1),
(None, (4, 3)),
],
[10, (3,), (0.0, tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))],
[10, (3,), (0.0, tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))],
[10, (3,), (0.0, tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())],
[10, (3,), (0.0, tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())],
[
10,
(
4,
3,
),
(0.0, tuple()),
(None, (3,)),
(None, (3,), -1),
(None, (3,)),
],
[
10,
(
4,
3,
),
(0.0, tuple()),
(None, (3,)),
(None, (3,), -1),
(None, (4, 3)),
],
],
ids=str,
)
def test_TruncatedNormal(
self,
prior_samples,
shape,
mu,
sigma,
lower,
upper,
):
prior = self.sample_prior(
distribution=pm.TruncatedNormal,
shape=shape,
nested_rvs_info=dict(mu=mu, sigma=sigma, lower=lower, upper=upper),
prior_samples=prior_samples,
)
assert prior["target"].shape == (prior_samples,) + shape
@pytest.mark.parametrize(
["prior_samples", "shape", "c", "lower", "upper"],
[
[10, (3,), (None, tuple()), (-1.0, (3,)), (2, tuple())],
[10, (3,), (None, tuple()), (-1.0, tuple()), (None, tuple(), 1)],
[10, (3,), (None, (3,)), (-1.0, tuple()), (None, tuple(), 1)],
[
10,
(
4,
3,
),
(None, (3,)),
(-1.0, tuple()),
(None, (3,), 1),
],
[
10,
(
4,
3,
),
(None, (3,)),
(None, tuple(), -1),
(None, (3,), 1),
],
],
ids=str,
)
def test_Triangular(
self,
prior_samples,
shape,
c,
lower,
upper,
):
prior = self.sample_prior(
distribution=pm.Triangular,
shape=shape,
nested_rvs_info=dict(c=c, lower=lower, upper=upper),
prior_samples=prior_samples,
)
assert prior["target"].shape == (prior_samples,) + shape
def generate_shapes(include_params=False):
# fmt: off
mudim_as_event = [
[None, 1, 3, 10, (10, 3), 100],
[(3,)],
[(1,), (3,)],
["cov", "chol", "tau"]
]
# fmt: on
mudim_as_dist = [
[None, 1, 3, 10, (10, 3), 100],
[(10, 3)],
[(1,), (3,), (1, 1), (1, 3), (10, 1), (10, 3)],
["cov", "chol", "tau"],
]
if not include_params:
del mudim_as_event[-1]
del mudim_as_dist[-1]
data = itertools.chain(itertools.product(*mudim_as_event), itertools.product(*mudim_as_dist))
return data
@pytest.mark.skip(reason="This test is covered by Aesara")
class TestMvNormal(SeededTest):
@pytest.mark.parametrize(
["sample_shape", "dist_shape", "mu_shape", "param"],
generate_shapes(include_params=True),
ids=str,
)
def test_with_np_arrays(self, sample_shape, dist_shape, mu_shape, param):
dist = pm.MvNormal.dist(mu=np.ones(mu_shape), **{param: np.eye(3)}, shape=dist_shape)
output_shape = to_tuple(sample_shape) + dist_shape
assert dist.random(size=sample_shape).shape == output_shape
@pytest.mark.parametrize(
["sample_shape", "dist_shape", "mu_shape"],
generate_shapes(include_params=False),
ids=str,
)
def test_with_chol_rv(self, sample_shape, dist_shape, mu_shape):
with pm.Model() as model:
mu = pm.Normal("mu", 0.0, 1.0, shape=mu_shape)
sd_dist = pm.Exponential.dist(1.0, shape=3)
chol, corr, stds = pm.LKJCholeskyCov(
"chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True
)
mv = pm.MvNormal("mv", mu, chol=chol, shape=dist_shape)
prior = pm.sample_prior_predictive(samples=sample_shape)
assert prior["mv"].shape == to_tuple(sample_shape) + dist_shape
@pytest.mark.parametrize(
["sample_shape", "dist_shape", "mu_shape"],
generate_shapes(include_params=False),
ids=str,
)
def test_with_cov_rv(self, sample_shape, dist_shape, mu_shape):
with pm.Model() as model:
mu = pm.Normal("mu", 0.0, 1.0, shape=mu_shape)
sd_dist = pm.Exponential.dist(1.0, shape=3)
chol, corr, stds = pm.LKJCholeskyCov(
"chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True
)
mv = pm.MvNormal("mv", mu, cov=pm.math.dot(chol, chol.T), shape=dist_shape)
prior = pm.sample_prior_predictive(samples=sample_shape)
assert prior["mv"].shape == to_tuple(sample_shape) + dist_shape
def test_issue_3758(self):
np.random.seed(42)
ndim = 50
with pm.Model() as model:
a = pm.Normal("a", sigma=100, shape=ndim)
b = pm.Normal("b", mu=a, sigma=1, shape=ndim)
c = pm.MvNormal("c", mu=a, chol=np.linalg.cholesky(np.eye(ndim)), shape=ndim)
d = pm.MvNormal("d", mu=a, cov=np.eye(ndim), shape=ndim)
samples = pm.sample_prior_predictive(1000)
for var in "abcd":
assert not np.isnan(np.std(samples[var]))
for var in "bcd":
std = np.std(samples[var] - samples["a"])
npt.assert_allclose(std, 1, rtol=1e-2)
def test_issue_3829(self):
with pm.Model() as model:
x = pm.MvNormal("x", mu=np.zeros(5), cov=np.eye(5), shape=(2, 5))
trace_pp = pm.sample_prior_predictive(50)
assert np.shape(trace_pp["x"][0]) == (2, 5)
def test_issue_3706(self):
N = 10
Sigma = np.eye(2)
with pm.Model() as model:
X = pm.MvNormal("X", mu=np.zeros(2), cov=Sigma, shape=(N, 2))
betas = pm.Normal("betas", 0, 1, shape=2)
y = pm.Deterministic("y", pm.math.dot(X, betas))
prior_pred = pm.sample_prior_predictive(1)
assert prior_pred["X"].shape == (1, N, 2)
@pytest.mark.xfail(reason="This distribution has not been refactored for v4")
def test_matrix_normal_random_with_random_variables():
"""
This test checks for shape correctness when using MatrixNormal distribution
with parameters as random variables.
Originally reported - https://github.com/pymc-devs/pymc/issues/3585
"""
K = 3
D = 15
mu_0 = np.zeros((D, K))
lambd = 1.0
with pm.Model() as model:
sd_dist = pm.HalfCauchy.dist(beta=2.5)
packedL = pm.LKJCholeskyCov("packedL", eta=2, n=D, sd_dist=sd_dist)
L = pm.expand_packed_triangular(D, packedL, lower=True)
Sigma = pm.Deterministic("Sigma", L.dot(L.T)) # D x D covariance
mu = pm.MatrixNormal(
"mu", mu=mu_0, rowcov=(1 / lambd) * Sigma, colcov=np.eye(K), shape=(D, K)
)
prior = pm.sample_prior_predictive(2)
assert prior["mu"].shape == (2, D, K)
@pytest.mark.xfail(reason="This distribution has not been refactored for v4")
class TestMvGaussianRandomWalk(SeededTest):
@pytest.mark.parametrize(
["sample_shape", "dist_shape", "mu_shape", "param"],
generate_shapes(include_params=True),
ids=str,
)
def test_with_np_arrays(self, sample_shape, dist_shape, mu_shape, param):
dist = pm.MvGaussianRandomWalk.dist(
mu=np.ones(mu_shape), **{param: np.eye(3)}, shape=dist_shape
)
output_shape = to_tuple(sample_shape) + dist_shape
assert dist.random(size=sample_shape).shape == output_shape
@pytest.mark.xfail
@pytest.mark.parametrize(
["sample_shape", "dist_shape", "mu_shape"],
generate_shapes(include_params=False),
ids=str,
)
def test_with_chol_rv(self, sample_shape, dist_shape, mu_shape):
with pm.Model() as model:
mu = pm.Normal("mu", 0.0, 1.0, shape=mu_shape)
sd_dist = pm.Exponential.dist(1.0, shape=3)
chol, corr, stds = pm.LKJCholeskyCov(
"chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True
)
mv = pm.MvGaussianRandomWalk("mv", mu, chol=chol, shape=dist_shape)
prior = pm.sample_prior_predictive(samples=sample_shape)
assert prior["mv"].shape == to_tuple(sample_shape) + dist_shape
@pytest.mark.xfail
@pytest.mark.parametrize(
["sample_shape", "dist_shape", "mu_shape"],
generate_shapes(include_params=False),
ids=str,
)
def test_with_cov_rv(self, sample_shape, dist_shape, mu_shape):
with pm.Model() as model:
mu = pm.Normal("mu", 0.0, 1.0, shape=mu_shape)
sd_dist = pm.Exponential.dist(1.0, shape=3)
chol, corr, stds = pm.LKJCholeskyCov(
"chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True
)
mv = pm.MvGaussianRandomWalk("mv", mu, cov=pm.math.dot(chol, chol.T), shape=dist_shape)
prior = pm.sample_prior_predictive(samples=sample_shape)
assert prior["mv"].shape == to_tuple(sample_shape) + dist_shape
@pytest.mark.parametrize("sparse", [True, False])
def test_car_rng_fn(sparse):
delta = 0.05 # limit for KS p-value
n_fails = 20 # Allows the KS fails a certain number of times
size = (100,)
W = np.array(
[[0.0, 1.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 0.0]]
)
tau = 2
alpha = 0.5
mu = np.array([1, 1, 1, 1])
D = W.sum(axis=0)
prec = tau * (np.diag(D) - alpha * W)
cov = np.linalg.inv(prec)
W = aesara.tensor.as_tensor_variable(W)
if sparse:
W = aesara.sparse.csr_from_dense(W)
with pm.Model(rng_seeder=1):
car = pm.CAR("car", mu, W, alpha, tau, size=size)
mn = pm.MvNormal("mn", mu, cov, size=size)
check = pm.sample_prior_predictive(n_fails, return_inferencedata=False)
p, f = delta, n_fails
while p <= delta and f > 0:
car_smp, mn_smp = check["car"][f - 1, :, :], check["mn"][f - 1, :, :]
p = min(
st.ks_2samp(
np.atleast_1d(car_smp[..., idx]).flatten(),
np.atleast_1d(mn_smp[..., idx]).flatten(),
)[1]
for idx in range(car_smp.shape[-1])
)
f -= 1
assert p > delta
| 35.211755
| 112
| 0.593776
|
d718e8d23069959591cf8b758d5c45381a08e628
| 873
|
py
|
Python
|
pyACA/FeatureSpectralSpread.py
|
RichardYang40148/pyACA-1
|
870d100ed232cca5a890570426116f70cd0736c8
|
[
"MIT"
] | null | null | null |
pyACA/FeatureSpectralSpread.py
|
RichardYang40148/pyACA-1
|
870d100ed232cca5a890570426116f70cd0736c8
|
[
"MIT"
] | null | null | null |
pyACA/FeatureSpectralSpread.py
|
RichardYang40148/pyACA-1
|
870d100ed232cca5a890570426116f70cd0736c8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
computes the spectral spread from the magnitude spectrum
Args:
X: spectrogram (dimension FFTLength X Observations)
f_s: sample rate of audio data
Returns:
vss spectral spread (in Hz)
"""
import numpy as np
from FeatureSpectralCentroid import FeatureSpectralCentroid
def FeatureSpectralSpread(X,f_s):
# get spectral centroid as index
vsc = FeatureSpectralCentroid (X, f_s)*2/f_s * (X.shape[0]-1)
#X = X**2 removed for consistency with book
norm = X.sum(axis=0)
norm[norm == 0] = 1
# compute spread
vss = np.zeros(X.shape[1])
indices = np.arange(0,X.shape[0])
for n in range(0,X.shape[1]):
vss[n] = np.dot((indices-vsc[0,n])**2, X[:,n]) / norm[n]
vss = np.sqrt(vss)
# convert from index to Hz
vss = vss / (X.shape[0]-1) * f_s/2
return (vss)
| 22.973684
| 65
| 0.618557
|
93dc622697e60e16d08254e8228d4ad66792e772
| 4,134
|
py
|
Python
|
unittest_reinvent/inception_tests/test_empty_inception.py
|
fujirock/Reinvent
|
9c57636f9d32b4ce5b75670f43906a70d5daf886
|
[
"MIT"
] | 4
|
2021-05-11T05:34:01.000Z
|
2022-03-30T10:04:21.000Z
|
unittest_reinvent/inception_tests/test_empty_inception.py
|
prasannavd/Reinvent
|
ca02ebee8d8ed83223c55f4a1dd1b3fbc2359616
|
[
"MIT"
] | null | null | null |
unittest_reinvent/inception_tests/test_empty_inception.py
|
prasannavd/Reinvent
|
ca02ebee8d8ed83223c55f4a1dd1b3fbc2359616
|
[
"MIT"
] | 2
|
2021-06-01T11:56:10.000Z
|
2021-10-05T04:33:56.000Z
|
import os
import shutil
import unittest
import numpy as np
import numpy.testing as nt
import torch as ts
import utils.general as utils_general
from models.model import Model
from running_modes.configurations.reinforcement_learning.inception_configuration import InceptionConfiguration
from running_modes.reinforcement_learning.inception import Inception
from scoring.component_parameters import ComponentParameters
from scoring.function import CustomSum
from unittest_reinvent.fixtures.paths import RANDOM_PRIOR_PATH, MAIN_TEST_PATH
from utils.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum
class Test_empty_inception(unittest.TestCase):
def setUp(self):
sf_enum = ScoringFunctionComponentNameEnum()
utils_general.set_default_device_cuda()
self.log_path = MAIN_TEST_PATH
if not os.path.isdir(self.log_path):
os.makedirs(self.log_path)
smiles = []
score = []
prior_likelihood = ts.tensor([])
prior = Model.load_from_file(RANDOM_PRIOR_PATH)
config = InceptionConfiguration(smiles=smiles, memory_size=4, sample_size=4)
scoring = ComponentParameters(component_type=sf_enum.JACCARD_DISTANCE,
name="jaccard_distance",
weight=1.,
smiles=["CONN", "O=S(=O)(c3ccc(n1nc(cc1c2ccc(cc2)C)C(F)(F)F)cc3)N"],
model_path="",
specific_parameters={})
scoringfunction = CustomSum(parameters=[scoring])
self.inception_model = Inception(configuration=config, scoring_function=scoringfunction, prior=prior)
self.inception_model.add(smiles, score, prior_likelihood)
def tearDown(self):
if os.path.isdir(self.log_path):
shutil.rmtree(self.log_path)
def test_empty_add(self):
smiles = np.array(['CCC', 'CCCC', 'CC'])
score = [0, 0.5, 1.]
prior_likelihood = ts.tensor([0, 10, 100])
self.inception_model.add(smiles, score, prior_likelihood)
self.assertEqual(len(self.inception_model.memory), 3)
def test_empty_add_2(self):
smiles = np.array(['CCC', 'CCCC', 'CCCCC', 'COCNNC'])
score = [0, 0.5, 0.5, 1]
prior_likelihood = ts.tensor([0, 10, 10, 100])
self.inception_model.add(smiles, score, prior_likelihood)
self.assertEqual(len(self.inception_model.sample()[0]), self.inception_model.configuration.memory_size)
self.assertEqual(len(self.inception_model.sample()[1]), self.inception_model.configuration.sample_size)
self.assertEqual(len(self.inception_model.sample()[2]), self.inception_model.configuration.memory_size)
self.assertEqual(len(self.inception_model.sample()[2]), self.inception_model.configuration.sample_size)
nt.assert_almost_equal(np.array([1, 0.5, 0.5, 0]), np.array(self.inception_model.memory['score'].values))
nt.assert_almost_equal(np.array([100, 10, 10, 0]), np.array(self.inception_model.memory['likelihood'].values))
def test_empty_eval_add_1(self):
sf_enum = ScoringFunctionComponentNameEnum()
smiles = np.array(['CCC', 'CCCC', 'CC', 'COO'])
scoring = ComponentParameters(component_type=sf_enum.TANIMOTO_SIMILARITY,
name="tanimoto_similarity",
weight=1.,
smiles=["CCC", "CC"],
model_path="",
specific_parameters={})
scoringfunction = CustomSum(parameters=[scoring])
prior = Model.load_from_file(RANDOM_PRIOR_PATH)
self.inception_model.evaluate_and_add(smiles, scoringfunction, prior)
self.assertEqual(len(self.inception_model.memory), 4)
nt.assert_almost_equal(np.array(self.inception_model.memory['score'].values), np.array([1, 1, 0.6667, 0.1250]),
4)
self.assertEqual(len(np.array(self.inception_model.memory['likelihood'].values)), 4)
| 49.807229
| 119
| 0.652879
|
ab037f25a1721157c0d866217fdf19be324b67bd
| 22,155
|
py
|
Python
|
swaps/client/market.py
|
DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur
|
1120ebfb487ce4987fe70e6645b36e0d7ce041ec
|
[
"Apache-2.0"
] | 1
|
2021-09-06T00:09:11.000Z
|
2021-09-06T00:09:11.000Z
|
swaps/client/market.py
|
DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur
|
1120ebfb487ce4987fe70e6645b36e0d7ce041ec
|
[
"Apache-2.0"
] | null | null | null |
swaps/client/market.py
|
DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur
|
1120ebfb487ce4987fe70e6645b36e0d7ce041ec
|
[
"Apache-2.0"
] | null | null | null |
from swaps.constant import *
from swaps.model.market import *
from swaps.utils import *
from swaps.utils.input_checker import check_in_list
class MarketClient(object):
def __init__(self, **kwargs):
"""
Create the request client instance.
:param kwargs: The option of request connection.
api_key: The public key applied from Huobi.
secret_key: The private key applied from Huobi.
url: The URL name like "https://api.huobi.pro".
init_log: to init logger
"""
self.__kwargs = kwargs
def get_candlestick(self, symbol, period, size=200):
"""
Get the candlestick/kline for the specified symbol. The data number is 150 as default.
:param symbol: The symbol, like "btcusdt". To query hb10, put "hb10" at here. (mandatory)
:param period: The candlestick/kline interval, MIN1, MIN5, DAY1 etc. (mandatory)
:param size: The start time of of requested candlestick/kline data. (optional)
:return: The list of candlestick/kline data.
"""
check_symbol(symbol)
check_should_not_none(period, "period")
check_range(size, 1, 2000, "size")
params = {
"symbol": symbol,
"period": period,
"size": size
}
from swaps.service.market.get_candlestick import GetCandleStickService
return GetCandleStickService(params).request(**self.__kwargs)
def sub_candlestick(self, symbols: 'str', interval: 'CandlestickInterval', callback, error_handler):
"""
Subscribe candlestick/kline event. If the candlestick/kline is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param interval: The candlestick/kline interval, MIN1, MIN5, DAY1 etc.
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(candlestick_event: 'CandlestickEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(interval, "interval")
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
"interval": interval,
}
from swaps.service.market.sub_candlestick import SubCandleStickService
SubCandleStickService(params).subscribe(callback, error_handler, **self.__kwargs)
def req_candlestick(self, symbols: 'str', interval: 'CandlestickInterval', callback,
from_ts_second=None, end_ts_second=None, error_handler=None):
"""
Subscribe candlestick/kline event. If the candlestick/kline is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param interval: The candlestick/kline interval, MIN1, MIN5, DAY1 etc.
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(candlestick_event: 'CandlestickEvent'):
pass
:param from_ts_second : data from timestamp [it's second]
:param end_ts_second : data util timestamp [it's second]
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(interval, "interval")
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
"interval": interval,
"from_ts_second": from_ts_second,
"end_ts_second": end_ts_second
}
from swaps.service.market.req_candlestick import ReqCandleStickService
ReqCandleStickService(params).subscribe(callback, error_handler, **self.__kwargs)
def get_pricedepth(self, symbol: 'str', depth_type: 'str', depth_size: 'int' = None) -> PriceDepth:
"""
Get the Market Depth of a symbol.
:param symbol: The symbol, like "btcusdt". (mandatory)
:param depth_type: The tpye, like "step0" to "step5". (mandatory)
:param depth_size(optional): The maximum number of Market Depth step0 requested. range [1 - 150], default is 150
The maximum number of Market Depth step1,step2,step3,step4,step5 requested. size is in [5, 10, 20], default is 20.
:return: Market Depth data.
"""
check_symbol(symbol)
check_in_list(depth_type, [DepthStep.STEP0, DepthStep.STEP1, DepthStep.STEP2, DepthStep.STEP3, DepthStep.STEP4,
DepthStep.STEP5], "depth_type")
params = {
"symbol": symbol,
"type": depth_type,
# "depth": depth_size
}
from swaps.service.market.get_pricedepth import GetPriceDepthService
ret_data = GetPriceDepthService(params).request(**self.__kwargs)
if depth_size is not None:
if (ret_data.bids is not None) and (len(ret_data.bids) > depth_size):
ret_data.bids = ret_data.bids[0:depth_size]
if (ret_data.asks is not None) and (len(ret_data.asks) > depth_size):
ret_data.asks = ret_data.asks[0:depth_size]
return ret_data
@staticmethod
def get_depth_step_list():
return [DepthStep.STEP0,
DepthStep.STEP1,
DepthStep.STEP2,
DepthStep.STEP3,
DepthStep.STEP4,
DepthStep.STEP5]
@staticmethod
def get_valid_depth_step(value, defalut_value):
step_list = MarketClient.get_depth_step_list()
if value in step_list:
return value
else:
return defalut_value
def sub_pricedepth(self, symbols: 'str', depth_step: 'str', callback, error_handler=None):
"""
Subscribe price depth event. If the price depth is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param depth_step: The depth precision, string from step0 to step5.
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
new_step = MarketClient.get_valid_depth_step(value=depth_step, defalut_value=DepthStep.STEP0)
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
"step": new_step,
}
from swaps.service.market.sub_pricedepth import SubPriceDepthService
SubPriceDepthService(params).subscribe(callback, error_handler, **self.__kwargs)
def sub_pricedepth_bbo(self, symbols: 'str', callback, error_handler=None):
"""
Subscribe price depth event. If the price depth is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
}
from swaps.service.market.sub_pricedepth_bbo import SubPriceDepthBboService
SubPriceDepthBboService(params).subscribe(callback, error_handler, **self.__kwargs)
def req_pricedepth(self, symbols: 'str', depth_step: 'str', callback, error_handler=None):
"""
Subscribe price depth event. If the price depth is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param depth_step: The depth precision, string from step0 to step5.
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
new_step = MarketClient.get_valid_depth_step(value=depth_step, defalut_value=DepthStep.STEP0)
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
"step": new_step,
}
from swaps.service.market.req_pricedepth import ReqPriceDepthService
ReqPriceDepthService(params).subscribe(callback, error_handler, **self.__kwargs)
def get_market_detail(self, symbol: 'str') -> MarketDetail:
"""
Get trade statistics in 24 hours.
:param symbol: The symbol, like "btcusdt". (mandatory)
:return: Trade statistics.
"""
check_symbol(symbol)
params = {
"symbol": symbol,
}
from swaps.service.market.get_market_detail import GetMarketDetailService
return GetMarketDetailService(params).request(**self.__kwargs)
def sub_market_detail(self, symbols: 'str', callback, error_handler=None):
"""
Subscribe 24 hours trade statistics event. If statistics is generated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(trade_statistics_event: 'TradeStatisticsEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
}
from swaps.service.market.sub_market_detail import SubMarketDetailService
SubMarketDetailService(params).subscribe(callback, error_handler, **self.__kwargs)
def req_market_detail(self, symbols: 'str', callback, error_handler=None):
"""
Subscribe 24 hours trade statistics event. If statistics is generated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(trade_statistics_event: 'TradeStatisticsEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
}
from swaps.service.market.req_market_detail import ReqMarketDetailService
ReqMarketDetailService(params).subscribe(callback, error_handler, **self.__kwargs)
def get_market_trade(self, symbol: 'str') -> list:
"""
Get the most recent trades with their price, volume and direction.
:param symbol: The symbol, like "btcusdt". (mandatory)
:return: The list of trade.
"""
check_symbol(symbol)
params = {
"symbol": symbol,
}
from swaps.service.market.get_market_trade import GetMarketTradeService
return GetMarketTradeService(params).request(**self.__kwargs)
def get_history_trade(self, symbol: 'str', size: 'int' = None) -> list:
"""
Get the most recent trades with their price, volume and direction.
:param symbol: The symbol, like "btcusdt". (mandatory)
:param size: The number of historical trade requested, range [1 - 2000] (optional)
:return: The list of trade.
"""
check_symbol(symbol)
check_range(size, 1, 2000, "size")
params = {
"symbol": symbol,
"size": size
}
from swaps.service.market.get_history_trade import GetHistoryTradeService
return GetHistoryTradeService(params).request(**self.__kwargs)
def sub_trade_detail(self, symbols: 'str', callback, error_handler=None):
"""
Subscribe price depth event. If the price depth is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(trade_event: 'TradeEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
}
from swaps.service.market.sub_trade_detail import SubTradeDetailService
SubTradeDetailService(params).subscribe(callback, error_handler, **self.__kwargs)
def req_trade_detail(self, symbols: 'str', callback, error_handler=None):
"""
Subscribe price depth event. If the price depth is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(trade_event: 'TradeEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
}
from swaps.service.market.req_trade_detail import ReqTradeDetailService
ReqTradeDetailService(params).subscribe(callback, error_handler, **self.__kwargs)
def get_market_detail_merged(self, symbol):
check_symbol(symbol)
params = {
"symbol": symbol
}
from swaps.service.market.get_market_detail_merged import GetMarketDetailMergedService
return GetMarketDetailMergedService(params).request(**self.__kwargs)
def get_market_tickers(self) -> list:
"""
get market tickers
:return: market ticker list.
"""
params = {}
from swaps.service.market.get_market_tickers import GetMarketTickersService
return GetMarketTickersService(params).request(**self.__kwargs)
"""
increase mbp(market by price)
"""
def sub_mbp_increase(self, symbols: 'str', levels: 'int', callback, error_handler=None):
"""
Subscribe mbp event. If the mbp is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param levels: level, 5,10,20,150. current only support 150
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
check_should_not_none(symbols, "symbol")
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(levels, "levels")
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
"levels": levels
}
from swaps.service.market.sub_mbp_increase import SubMbpIncreaseService
SubMbpIncreaseService(params).subscribe(callback, error_handler, **self.__kwargs)
"""
subscribe full mbp(market by price)
"""
def sub_mbp_full(self, symbols: 'str', levels: 'int', callback, error_handler=None):
"""
Subscribe full mbp event. If the mbp is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param levels: level, 5,10,20
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
check_should_not_none(symbols, "symbol")
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(levels, "levels")
check_in_list(levels, [MbpLevel.MBP5, MbpLevel.MBP10, MbpLevel.MBP20], "levels")
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
"levels": levels
}
from swaps.service.market.sub_mbp_full import SubMbpFullService
SubMbpFullService(params).subscribe(callback, error_handler, **self.__kwargs)
def req_mbp(self, symbols: 'str', levels: 'int', callback, auto_close=True, error_handler=None):
"""
Subscribe mbp event. If the mbp is updated, server will send the data to client and onReceive in callback will be called.
:param symbols: The symbols, like "btcusdt". Use comma to separate multi symbols, like "btcusdt,ethusdt".
:param levels: level, 5,10,20,150. current only support 150
:param callback: The implementation is required. onReceive will be called if receive server's update.
example: def callback(price_depth_event: 'PriceDepthEvent'):
pass
:param auto_close : close websocket connection after get data
:param error_handler: The error handler will be called if subscription failed or error happen between client and Huobi server
example: def error_handler(exception: 'HuobiApiException')
pass
:return: No return
"""
check_should_not_none(symbols, "symbol")
symbol_list = symbols.split(",")
check_symbol_list(symbol_list)
check_should_not_none(levels, "levels")
check_should_not_none(callback, "callback")
params = {
"symbol_list": symbol_list,
"levels": levels
}
from swaps.service.market.req_mbp import ReqMbpService
ReqMbpService(params).subscribe(callback, error_handler, **self.__kwargs)
| 44.577465
| 157
| 0.653893
|
8d53e6b062a3ef25bc51f74d907d7000efe038c5
| 765
|
py
|
Python
|
src/pyrtime/rtime/datastructures.py
|
amitu/rtime
|
6e23a2f86a2cae4399932879f0d157f8e63a8358
|
[
"BSD-3-Clause"
] | 2
|
2017-04-01T14:05:58.000Z
|
2017-04-02T07:57:56.000Z
|
src/pyrtime/rtime/datastructures.py
|
amitu/rtime
|
6e23a2f86a2cae4399932879f0d157f8e63a8358
|
[
"BSD-3-Clause"
] | 3
|
2016-11-08T08:00:08.000Z
|
2016-11-08T08:15:20.000Z
|
src/pyrtime/rtime/datastructures.py
|
amitu/rtime
|
6e23a2f86a2cae4399932879f0d157f8e63a8358
|
[
"BSD-3-Clause"
] | null | null | null |
class Frame(dict):
def __init__(self, name=None, *args, **kwargs):
super(Frame, self).__init__(*args, **kwargs)
if name:
self.name = name
def get_current_frame(self):
if not self.stack:
self.stack.insert(-1, Frame())
return self.stack[-1]
def get_name(self):
return self.get('name')
def set_name(self, name):
if not name:
raise ValueError('name cannot be set empty')
self['name'] = name
name = property(get_name, set_name)
@property
def stack(self):
return self.setdefault('stack', [])
def push_frame(self, name):
self.stack.append(Frame(name=name))
def add_frame_data(self, **kwargs):
self.update(kwargs)
| 24.677419
| 56
| 0.580392
|
983d7c5b9f43b607603a25d22267e4ebe302b830
| 5,931
|
py
|
Python
|
tabla/tabla/simulation/pu.py
|
ziqingzeng/public
|
4102b3bd42f43b49cf74599492d52d4f755ab7b2
|
[
"BSD-3-Clause"
] | 6
|
2021-04-20T06:33:25.000Z
|
2022-02-24T06:46:13.000Z
|
tabla/tabla/simulation/pu.py
|
ziqingzeng/public
|
4102b3bd42f43b49cf74599492d52d4f755ab7b2
|
[
"BSD-3-Clause"
] | 3
|
2021-04-20T04:28:51.000Z
|
2021-05-24T05:14:31.000Z
|
tabla/tabla/simulation/pu.py
|
ziqingzeng/public
|
4102b3bd42f43b49cf74599492d52d4f755ab7b2
|
[
"BSD-3-Clause"
] | 4
|
2021-04-08T16:38:46.000Z
|
2021-04-30T05:51:30.000Z
|
from .pe import PE
from .bus import PENB, PEGB
from .buffer import Buffer
from .bus_arbiter import PEGBArbiter
from .defaults import DEFAULT_NAMESPACE_BUFFER_SIZE, DEFAULT_BUS_BUFFER_SIZE, DEFAULT_INPUT_BITWIDTH, DEFAULT_INTERIM_BITWIDTH, DEFAULT_BUS_BITWIDTH
"""
PU has PEs, buses, bus arbiter.
"""
class PU(object):
def __init__(self, id,
pes_per_pu,
pe_buffer_size=DEFAULT_NAMESPACE_BUFFER_SIZE,
buffer_interim_size=DEFAULT_NAMESPACE_BUFFER_SIZE,
input_bitwidth=DEFAULT_INPUT_BITWIDTH,
interim_bitwidth=DEFAULT_INTERIM_BITWIDTH,
bus_bitwidth=DEFAULT_BUS_BITWIDTH,
bus_buffer_size=DEFAULT_BUS_BUFFER_SIZE,
debug=False):
self.id = id
self.pes_per_pu = pes_per_pu
# Size of namespace buffers for the PEs that belong to this PU
self.pe_buffer_size = pe_buffer_size
# Size of NI (namespace interim) buffer for the PEs that belong ot this PU
self.buffer_interim_size = buffer_interim_size
self.input_bitwidth = input_bitwidth
self.interim_bitwidth = interim_bitwidth
# Create PEs for this PU
self.pes = []
for i in range(pes_per_pu):
relative_id = i
absolute_id = pes_per_pu * id + relative_id
pe = PE(absolute_id, relative_id, self.pe_buffer_size,
self.buffer_interim_size,
self.input_bitwidth,
self.interim_bitwidth,
debug=debug)
self.pes.append(pe)
# Set Head PE of this PU
self.head_pe = self.pes[0]
self.head_pe.is_head_pe = True
# Set PENB's for each pair of PEs in this PU
for i, pe in enumerate(self.pes[:-1]):
source_pe = pe
dest_pe = self.pes[i + 1]
penb = PENB(source_pe, dest_pe, debug=debug)
# print(penb)
source_pe.set_penb(penb)
# Set last PE's neighbor to be first PE
last_pe = self.pes[-1]
first_pe = self.pes[0]
penb = PENB(last_pe, first_pe, debug=debug)
# print(penb)
last_pe.set_penb(penb)
self.bus_bitwidth = bus_bitwidth
self.bus_buffer_size = bus_buffer_size
# PE Global Bus for the PEs that belong to this PU
self.pegb = PEGB(self.pes, self.bus_bitwidth, debug=debug)
# PE Global Bus Arbiter
self.bus_arbiter = PEGBArbiter(self.pegb, debug=debug)
self.cycle = 0
self.debug = debug
def __str__(self):
pe_str = ''
for pe in self.pes:
pe_str += 'PE ' + str(pe.absolute_id) + ', '
s = f'PU {self.id}\n' + \
f'\t{pe_str}\n' + \
f'\t{self.pegb.__str__()}'
return s
def buffer_sizes(self):
sizes = {}
for pe in self.pes:
sizes[f'PE{pe.relative_id}'] = pe.buffer_sizes()
sizes[f'PEGB'] = int(self.pegb.new_data_written)
return sizes
# Probably won't be used much
def load_instructions_to_pe(self, pe_id_relative, instructions):
pe = self.pes[pe_id_relative]
pe.load_instructions(instructions)
def run_one_cycle(self):
# if self.debug:
# print(f'Cycle {self.cycle}')
self.accessed = False
# Dictionary to hold access counts for each on-chip memory component in this PE
# Example format: {"PE0": pe access stats dictionary...}
# Use PE absolute id for this
pu_access_stats = {}
self.bus_arbiter.run_one_cycle()
self.accessed = self.bus_arbiter.accessed
for pe in self.pes:
if self.debug:
print(f'PE {pe.relative_id}')
pe_access_stats = pe.run_one_cycle()
pu_access_stats[f'PE_{pe.absolute_id}'] = pe_access_stats
if self.debug:
if pe.done_processing:
print(f'\tPE {pe.relative_id}: DONE PROCESSING')
else:
program_counter = pe.program_counter
num_insts = pe.instruction_memory.num_instructions
progress_percentage = int(program_counter / num_insts * 100)
print(f'\tPE {pe.relative_id} PC: {program_counter} out of {num_insts} total ({progress_percentage} %)')
self.accessed = self.accessed or pe.accessed
if self.debug:
print()
if self.done_processing:
print(f'\t*** PU {self.id} DONE PROCESSING ***')
elif self.accessed is False:
print(f'\t*** PU {self.id}: Nothing happened in cycle {self.cycle} ***')
self.cycle += 1
if self.debug:
print()
return pu_access_stats
def run_cycles(self, cycles):
for i in range(cycles):
self.run_one_cycle()
@property
def done_processing(self):
"""
Returns True if this all PE's in this PU completed processing all instructions.
"""
status = True
for pe in self.pes:
if not pe.done_processing:
# if self.debug:
# print(f'\tPE {pe.absolute_id} did not complete processing all insts')
return False
return status
def set_punb(self, punb):
"""
Set the PUNB of Head PE.
"""
self.head_pe.set_punb(punb)
# TODO (Not important) use this in write_to_pu_read_buffer() function and test it
@property
def pugb_read_buffer(self):
return self.head_pe.pugb_read_buffer
# TODO (Not important) use this in read_from_pu_write_buffer() function and test it
@property
def pugb_write_buffer(self):
return self.head_pe.pugb_write_buffer
if __name__ == '__main__':
pu = PU(1, 8)
print(pu)
pe = pu.pes[1]
print(pe)
| 31.887097
| 148
| 0.588771
|
01b75982886769bd5d645ae0ccd934107bff44d0
| 3,062
|
py
|
Python
|
aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/CreateAccountRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/CreateAccountRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/CreateAccountRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateAccountRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'polardb', '2017-08-01', 'CreateAccount','polardb')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_DBClusterId(self):
return self.get_query_params().get('DBClusterId')
def set_DBClusterId(self,DBClusterId):
self.add_query_param('DBClusterId',DBClusterId)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_AccountType(self):
return self.get_query_params().get('AccountType')
def set_AccountType(self,AccountType):
self.add_query_param('AccountType',AccountType)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_AccountDescription(self):
return self.get_query_params().get('AccountDescription')
def set_AccountDescription(self,AccountDescription):
self.add_query_param('AccountDescription',AccountDescription)
def get_AccountPrivilege(self):
return self.get_query_params().get('AccountPrivilege')
def set_AccountPrivilege(self,AccountPrivilege):
self.add_query_param('AccountPrivilege',AccountPrivilege)
def get_AccountPassword(self):
return self.get_query_params().get('AccountPassword')
def set_AccountPassword(self,AccountPassword):
self.add_query_param('AccountPassword',AccountPassword)
def get_AccountName(self):
return self.get_query_params().get('AccountName')
def set_AccountName(self,AccountName):
self.add_query_param('AccountName',AccountName)
def get_DBName(self):
return self.get_query_params().get('DBName')
def set_DBName(self,DBName):
self.add_query_param('DBName',DBName)
| 34.022222
| 80
| 0.775963
|
7e688f50c310fd3ce74bede2c94b02e5f65457f7
| 587
|
py
|
Python
|
portafolio/core/migrations/0011_auto_20200805_2059.py
|
jhonfmg7/portafolioDjango
|
64db6a371a84dcad4f22dd7cdeb598c7c2db124e
|
[
"Apache-2.0"
] | null | null | null |
portafolio/core/migrations/0011_auto_20200805_2059.py
|
jhonfmg7/portafolioDjango
|
64db6a371a84dcad4f22dd7cdeb598c7c2db124e
|
[
"Apache-2.0"
] | null | null | null |
portafolio/core/migrations/0011_auto_20200805_2059.py
|
jhonfmg7/portafolioDjango
|
64db6a371a84dcad4f22dd7cdeb598c7c2db124e
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-08-05 20:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0010_choose'),
]
operations = [
migrations.AlterModelOptions(
name='choose',
options={'verbose_name': 'Elección', 'verbose_name_plural': 'Elecciones'},
),
migrations.AddField(
model_name='video',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='video', verbose_name='Imagen video'),
),
]
| 25.521739
| 107
| 0.592845
|
d5bd8f9a6dc790e0cebf906d0a89d51949efdbbd
| 11,259
|
py
|
Python
|
config/settings/base.py
|
Dimercel/feedz
|
d1f0ab1558b6df63452d1ac12847e3e816c83c31
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
Dimercel/feedz
|
d1f0ab1558b6df63452d1ac12847e3e816c83c31
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
Dimercel/feedz
|
d1f0ab1558b6df63452d1ac12847e3e816c83c31
|
[
"MIT"
] | null | null | null |
"""
Base settings to build other settings files upon.
"""
from datetime import timedelta
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# feedz/
APPS_DIR = ROOT_DIR / "feedz"
env = environ.Env()
# READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
# if READ_DOT_ENV_FILE:
# # OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / "local.env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "Europe/Moscow"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "ru-RU"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
"default": env.db("DATABASE_URL", default="postgres:///feedz")
}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
]
LOCAL_APPS = [
"feedz.users.apps.UsersConfig",
"feedz.aggregator",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "feedz.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR / "static"),
str(ROOT_DIR / "node_modules")
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"feedz.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Ito Dimercel""", "xolcman@gmail.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "feedz.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "feedz.users.adapters.SocialAccountAdapter"
# Application settings
# ------------------------------------------------------------------------------
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# App specific settings
# ------------------------------------------------------------------------------
BOT_USER_AGENT = env('BOT_USER_AGENT')
MIN_SYNC_TIME_DELTA = timedelta(hours=1)
| 39.644366
| 93
| 0.629363
|
9f68312a6499a1fbd9410df34a1ef90165da765d
| 2,580
|
py
|
Python
|
app/nlp_v2/bulk_predict.py
|
s2t2/tweet-analyzer-py
|
0a398fc47101a2d602d8c4116c970f1076a58f27
|
[
"MIT"
] | 5
|
2020-04-02T12:03:57.000Z
|
2020-10-18T19:29:15.000Z
|
app/nlp_v2/bulk_predict.py
|
s2t2/tweet-analyzer-py
|
0a398fc47101a2d602d8c4116c970f1076a58f27
|
[
"MIT"
] | 22
|
2020-03-31T02:00:34.000Z
|
2021-06-30T17:59:01.000Z
|
app/nlp_v2/bulk_predict.py
|
s2t2/tweet-analyzer-py
|
0a398fc47101a2d602d8c4116c970f1076a58f27
|
[
"MIT"
] | 3
|
2020-04-04T16:08:08.000Z
|
2020-10-20T01:32:46.000Z
|
import os
from pandas import DataFrame, read_csv
from app import seek_confirmation, DATA_DIR
from app.job import Job
from app.bq_service import BigQueryService
from app.nlp.model_storage import ModelStorage
LIMIT = os.getenv("LIMIT")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="100000"))
CSV_FILEPATH = os.path.join(DATA_DIR, "nlp_v2", "all_statuses.csv")
DESTRUCTIVE = (os.getenv("DESTRUCTIVE", default="false") == "true") # whether or not to re-download if a local file already exists
def save_batch(batch, csv_filepath=CSV_FILEPATH):
batch_df = DataFrame(batch, columns=["status_id", "status_text"])
if os.path.isfile(csv_filepath):
batch_df.to_csv(csv_filepath, mode="a", header=False, index=False)
else:
batch_df.to_csv(csv_filepath, index=False)
if __name__ == "__main__":
bq_service = BigQueryService()
job = Job()
if DESTRUCTIVE or not os.path.isfile(CSV_FILEPATH):
job.start()
batch = []
for row in bq_service.nlp_v2_fetch_statuses(limit=LIMIT):
batch.append({"status_id": row["status_id"], "status_text": row["status_text"]})
job.counter += 1
if job.counter % BATCH_SIZE == 0:
save_batch(batch)
batch = []
job.progress_report()
if len(batch) > 0:
save_batch(batch)
batch = []
job.end()
seek_confirmation()
#exit()
for model_name in ["logistic_regression", "multinomial_nb"]:
storage = ModelStorage(dirpath=f"nlp_v2/models/best/{model_name}")
tv = storage.load_vectorizer()
clf = storage.load_model()
print(f"DESTROY PREDICTIONS TABLE? ({model_name})")
seek_confirmation()
bq_service.nlp_v2_destructively_migrate_predictions_table(model_name)
predictions_table = bq_service.nlp_v2_get_predictions_table(model_name) # API call. cache it here once.
job.start()
for chunk_df in read_csv(CSV_FILEPATH, chunksize=BATCH_SIZE): # FYI: this will include the last chunk even if it is not a full batch
status_ids = chunk_df["status_id"].tolist()
status_texts = chunk_df["status_text"].tolist()
preds = clf.predict(tv.transform(status_texts))
batch = [{"status_id": status_id, "prediction": pred} for status_id, pred in zip(status_ids, preds)]
bq_service.insert_records_in_batches(predictions_table, batch)
job.counter += len(chunk_df)
job.progress_report()
batch = []
job.end()
| 33.947368
| 140
| 0.654651
|
ad4a625aa0bdf56bb8c5082a559dc4b1a5b97e2f
| 10,790
|
gyp
|
Python
|
ThirdParty/webrtc/src/webrtc/voice_engine/voice_engine.gyp
|
JokeJoe8806/licode-windows
|
2bfdaf6e87669df2b9960da50c6800bc3621b80b
|
[
"MIT"
] | 8
|
2018-12-27T14:57:13.000Z
|
2021-04-07T07:03:15.000Z
|
ThirdParty/webrtc/src/webrtc/voice_engine/voice_engine.gyp
|
JokeJoe8806/licode-windows
|
2bfdaf6e87669df2b9960da50c6800bc3621b80b
|
[
"MIT"
] | 1
|
2019-03-13T01:35:03.000Z
|
2020-10-08T04:13:04.000Z
|
ThirdParty/webrtc/src/webrtc/voice_engine/voice_engine.gyp
|
JokeJoe8806/licode-windows
|
2bfdaf6e87669df2b9960da50c6800bc3621b80b
|
[
"MIT"
] | 9
|
2018-12-28T11:45:12.000Z
|
2021-05-11T02:15:31.000Z
|
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'../build/common.gypi',
],
'targets': [
{
'target_name': 'voice_engine',
'type': 'static_library',
'dependencies': [
'<(webrtc_root)/common.gyp:webrtc_common',
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'<(webrtc_root)/modules/modules.gyp:audio_coding_module',
'<(webrtc_root)/modules/modules.gyp:audio_conference_mixer',
'<(webrtc_root)/modules/modules.gyp:audio_device',
'<(webrtc_root)/modules/modules.gyp:audio_processing',
'<(webrtc_root)/modules/modules.gyp:bitrate_controller',
'<(webrtc_root)/modules/modules.gyp:media_file',
'<(webrtc_root)/modules/modules.gyp:rtp_rtcp',
'<(webrtc_root)/modules/modules.gyp:webrtc_utility',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
],
'sources': [
'include/voe_audio_processing.h',
'include/voe_base.h',
'include/voe_codec.h',
'include/voe_dtmf.h',
'include/voe_errors.h',
'include/voe_external_media.h',
'include/voe_file.h',
'include/voe_hardware.h',
'include/voe_neteq_stats.h',
'include/voe_network.h',
'include/voe_rtp_rtcp.h',
'include/voe_video_sync.h',
'include/voe_volume_control.h',
'channel.cc',
'channel.h',
'channel_manager.cc',
'channel_manager.h',
'dtmf_inband.cc',
'dtmf_inband.h',
'dtmf_inband_queue.cc',
'dtmf_inband_queue.h',
'level_indicator.cc',
'level_indicator.h',
'monitor_module.cc',
'monitor_module.h',
'network_predictor.cc',
'network_predictor.h',
'output_mixer.cc',
'output_mixer.h',
'shared_data.cc',
'shared_data.h',
'statistics.cc',
'statistics.h',
'transmit_mixer.cc',
'transmit_mixer.h',
'utility.cc',
'utility.h',
'voe_audio_processing_impl.cc',
'voe_audio_processing_impl.h',
'voe_base_impl.cc',
'voe_base_impl.h',
'voe_codec_impl.cc',
'voe_codec_impl.h',
'voe_dtmf_impl.cc',
'voe_dtmf_impl.h',
'voe_external_media_impl.cc',
'voe_external_media_impl.h',
'voe_file_impl.cc',
'voe_file_impl.h',
'voe_hardware_impl.cc',
'voe_hardware_impl.h',
'voe_neteq_stats_impl.cc',
'voe_neteq_stats_impl.h',
'voe_network_impl.cc',
'voe_network_impl.h',
'voe_rtp_rtcp_impl.cc',
'voe_rtp_rtcp_impl.h',
'voe_video_sync_impl.cc',
'voe_video_sync_impl.h',
'voe_volume_control_impl.cc',
'voe_volume_control_impl.h',
'voice_engine_defines.h',
'voice_engine_impl.cc',
'voice_engine_impl.h',
],
},
],
'conditions': [
['OS=="win"', {
'defines': ['WEBRTC_DRIFT_COMPENSATION_SUPPORTED',],
}],
['include_tests==1', {
'targets': [
{
'target_name': 'voice_engine_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'voice_engine',
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(DEPTH)/testing/gtest.gyp:gtest',
# The rest are to satisfy the unittests' include chain.
# This would be unnecessary if we used qualified includes.
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'<(webrtc_root)/modules/modules.gyp:audio_device',
'<(webrtc_root)/modules/modules.gyp:audio_processing',
'<(webrtc_root)/modules/modules.gyp:audio_coding_module',
'<(webrtc_root)/modules/modules.gyp:audio_conference_mixer',
'<(webrtc_root)/modules/modules.gyp:media_file',
'<(webrtc_root)/modules/modules.gyp:rtp_rtcp',
'<(webrtc_root)/modules/modules.gyp:webrtc_utility',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/test/test.gyp:test_support_main',
],
'sources': [
'channel_unittest.cc',
'network_predictor_unittest.cc',
'transmit_mixer_unittest.cc',
'utility_unittest.cc',
'voe_audio_processing_unittest.cc',
'voe_base_unittest.cc',
'voe_codec_unittest.cc',
'voe_network_unittest.cc',
'voice_engine_fixture.cc',
'voice_engine_fixture.h',
],
'conditions': [
['OS=="android"', {
'dependencies': [
'<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',
],
}],
],
},
{
'target_name': 'voe_auto_test',
'type': 'executable',
'dependencies': [
'voice_engine',
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers_default',
'<(webrtc_root)/test/test.gyp:channel_transport',
'<(webrtc_root)/test/test.gyp:test_support',
],
'sources': [
'test/auto_test/automated_mode.cc',
'test/auto_test/extended/agc_config_test.cc',
'test/auto_test/extended/ec_metrics_test.cc',
'test/auto_test/fakes/conference_transport.cc',
'test/auto_test/fakes/conference_transport.h',
'test/auto_test/fakes/fake_external_transport.cc',
'test/auto_test/fakes/fake_external_transport.h',
'test/auto_test/fixtures/after_initialization_fixture.cc',
'test/auto_test/fixtures/after_initialization_fixture.h',
'test/auto_test/fixtures/after_streaming_fixture.cc',
'test/auto_test/fixtures/after_streaming_fixture.h',
'test/auto_test/fixtures/before_initialization_fixture.cc',
'test/auto_test/fixtures/before_initialization_fixture.h',
'test/auto_test/fixtures/before_streaming_fixture.cc',
'test/auto_test/fixtures/before_streaming_fixture.h',
'test/auto_test/standard/audio_processing_test.cc',
'test/auto_test/standard/codec_before_streaming_test.cc',
'test/auto_test/standard/codec_test.cc',
'test/auto_test/standard/dtmf_test.cc',
'test/auto_test/standard/external_media_test.cc',
'test/auto_test/standard/file_before_streaming_test.cc',
'test/auto_test/standard/file_test.cc',
'test/auto_test/standard/hardware_before_initializing_test.cc',
'test/auto_test/standard/hardware_before_streaming_test.cc',
'test/auto_test/standard/hardware_test.cc',
'test/auto_test/standard/mixing_test.cc',
'test/auto_test/standard/neteq_stats_test.cc',
'test/auto_test/standard/rtp_rtcp_before_streaming_test.cc',
'test/auto_test/standard/rtp_rtcp_extensions.cc',
'test/auto_test/standard/rtp_rtcp_test.cc',
'test/auto_test/standard/voe_base_misc_test.cc',
'test/auto_test/standard/video_sync_test.cc',
'test/auto_test/standard/volume_test.cc',
'test/auto_test/resource_manager.cc',
'test/auto_test/voe_conference_test.cc',
'test/auto_test/voe_cpu_test.cc',
'test/auto_test/voe_cpu_test.h',
'test/auto_test/voe_standard_test.cc',
'test/auto_test/voe_standard_test.h',
'test/auto_test/voe_stress_test.cc',
'test/auto_test/voe_stress_test.h',
'test/auto_test/voe_test_defines.h',
'test/auto_test/voe_test_interface.h',
],
'conditions': [
['OS=="android"', {
# some tests are not supported on android yet, exclude these tests.
'sources!': [
'test/auto_test/standard/hardware_before_streaming_test.cc',
],
}],
],
# Disable warnings to enable Win64 build, issue 1323.
'msvs_disabled_warnings': [
4267, # size_t to int truncation.
],
},
{
# command line test that should work on linux/mac/win
'target_name': 'voe_cmd_test',
'type': 'executable',
'dependencies': [
'voice_engine',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers_default',
'<(webrtc_root)/test/test.gyp:channel_transport',
'<(webrtc_root)/test/test.gyp:test_support',
],
'sources': [
'test/cmd_test/voe_cmd_test.cc',
],
},
], # targets
'conditions': [
['OS=="android"', {
'targets': [
{
'target_name': 'voice_engine_unittests_apk_target',
'type': 'none',
'dependencies': [
'<(apk_tests_path):voice_engine_unittests_apk',
],
},
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'voice_engine_unittests_run',
'type': 'none',
'dependencies': [
'voice_engine_unittests',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'voice_engine_unittests.isolate',
],
},
{
'target_name': 'voe_auto_test_run',
'type': 'none',
'dependencies': [
'voe_auto_test',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'voe_auto_test.isolate',
],
},
],
}],
], # conditions
}], # include_tests
], # conditions
}
| 38.673835
| 89
| 0.573401
|
8644d354b4b477854096a57ec7de1118e66f6672
| 9,785
|
py
|
Python
|
tests/unit/config_test.py
|
btaitelb/compose
|
43369cda9ce5f0fe4860c7160cfa6550d31e2417
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/config_test.py
|
btaitelb/compose
|
43369cda9ce5f0fe4860c7160cfa6550d31e2417
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/config_test.py
|
btaitelb/compose
|
43369cda9ce5f0fe4860c7160cfa6550d31e2417
|
[
"Apache-2.0"
] | null | null | null |
import os
import mock
from .. import unittest
from compose import config
class ConfigTest(unittest.TestCase):
def test_from_dictionary(self):
service_dicts = config.from_dictionary({
'foo': {'image': 'busybox'},
'bar': {'environment': ['FOO=1']},
})
self.assertEqual(
sorted(service_dicts, key=lambda d: d['name']),
sorted([
{
'name': 'bar',
'environment': {'FOO': '1'},
},
{
'name': 'foo',
'image': 'busybox',
}
])
)
def test_from_dictionary_throws_error_when_not_dict(self):
with self.assertRaises(config.ConfigurationError):
config.from_dictionary({
'web': 'busybox:latest',
})
def test_config_validation(self):
self.assertRaises(
config.ConfigurationError,
lambda: config.make_service_dict('foo', {'port': ['8000']})
)
config.make_service_dict('foo', {'ports': ['8000']})
class MergeTest(unittest.TestCase):
def test_merge_volumes_empty(self):
service_dict = config.merge_service_dicts({}, {})
self.assertNotIn('volumes', service_dict)
def test_merge_volumes_no_override(self):
service_dict = config.merge_service_dicts(
{'volumes': ['/foo:/code', '/data']},
{},
)
self.assertEqual(set(service_dict['volumes']), set(['/foo:/code', '/data']))
def test_merge_volumes_no_base(self):
service_dict = config.merge_service_dicts(
{},
{'volumes': ['/bar:/code']},
)
self.assertEqual(set(service_dict['volumes']), set(['/bar:/code']))
def test_merge_volumes_override_explicit_path(self):
service_dict = config.merge_service_dicts(
{'volumes': ['/foo:/code', '/data']},
{'volumes': ['/bar:/code']},
)
self.assertEqual(set(service_dict['volumes']), set(['/bar:/code', '/data']))
def test_merge_volumes_add_explicit_path(self):
service_dict = config.merge_service_dicts(
{'volumes': ['/foo:/code', '/data']},
{'volumes': ['/bar:/code', '/quux:/data']},
)
self.assertEqual(set(service_dict['volumes']), set(['/bar:/code', '/quux:/data']))
def test_merge_volumes_remove_explicit_path(self):
service_dict = config.merge_service_dicts(
{'volumes': ['/foo:/code', '/quux:/data']},
{'volumes': ['/bar:/code', '/data']},
)
self.assertEqual(set(service_dict['volumes']), set(['/bar:/code', '/data']))
class EnvTest(unittest.TestCase):
def test_parse_environment_as_list(self):
environment = [
'NORMAL=F1',
'CONTAINS_EQUALS=F=2',
'TRAILING_EQUALS=',
]
self.assertEqual(
config.parse_environment(environment),
{'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''},
)
def test_parse_environment_as_dict(self):
environment = {
'NORMAL': 'F1',
'CONTAINS_EQUALS': 'F=2',
'TRAILING_EQUALS': None,
}
self.assertEqual(config.parse_environment(environment), environment)
def test_parse_environment_invalid(self):
with self.assertRaises(config.ConfigurationError):
config.parse_environment('a=b')
def test_parse_environment_empty(self):
self.assertEqual(config.parse_environment(None), {})
@mock.patch.dict(os.environ)
def test_resolve_environment(self):
os.environ['FILE_DEF'] = 'E1'
os.environ['FILE_DEF_EMPTY'] = 'E2'
os.environ['ENV_DEF'] = 'E3'
service_dict = config.make_service_dict(
'foo', {
'environment': {
'FILE_DEF': 'F1',
'FILE_DEF_EMPTY': '',
'ENV_DEF': None,
'NO_DEF': None
},
},
)
self.assertEqual(
service_dict['environment'],
{'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''},
)
def test_env_from_file(self):
service_dict = config.make_service_dict(
'foo',
{'env_file': 'one.env'},
'tests/fixtures/env',
)
self.assertEqual(
service_dict['environment'],
{'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar'},
)
def test_env_from_multiple_files(self):
service_dict = config.make_service_dict(
'foo',
{'env_file': ['one.env', 'two.env']},
'tests/fixtures/env',
)
self.assertEqual(
service_dict['environment'],
{'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'},
)
def test_env_nonexistent_file(self):
options = {'env_file': 'nonexistent.env'}
self.assertRaises(
config.ConfigurationError,
lambda: config.make_service_dict('foo', options, 'tests/fixtures/env'),
)
@mock.patch.dict(os.environ)
def test_resolve_environment_from_file(self):
os.environ['FILE_DEF'] = 'E1'
os.environ['FILE_DEF_EMPTY'] = 'E2'
os.environ['ENV_DEF'] = 'E3'
service_dict = config.make_service_dict(
'foo',
{'env_file': 'resolve.env'},
'tests/fixtures/env',
)
self.assertEqual(
service_dict['environment'],
{'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''},
)
class ExtendsTest(unittest.TestCase):
def test_extends(self):
service_dicts = config.load('tests/fixtures/extends/docker-compose.yml')
service_dicts = sorted(
service_dicts,
key=lambda sd: sd['name'],
)
self.assertEqual(service_dicts, [
{
'name': 'mydb',
'image': 'busybox',
'command': 'sleep 300',
},
{
'name': 'myweb',
'image': 'busybox',
'command': 'sleep 300',
'links': ['mydb:db'],
'environment': {
"FOO": "1",
"BAR": "2",
"BAZ": "2",
},
}
])
def test_nested(self):
service_dicts = config.load('tests/fixtures/extends/nested.yml')
self.assertEqual(service_dicts, [
{
'name': 'myweb',
'image': 'busybox',
'command': '/bin/true',
'environment': {
"FOO": "2",
"BAR": "2",
},
},
])
def test_circular(self):
try:
config.load('tests/fixtures/extends/circle-1.yml')
raise Exception("Expected config.CircularReference to be raised")
except config.CircularReference as e:
self.assertEqual(
[(os.path.basename(filename), service_name) for (filename, service_name) in e.trail],
[
('circle-1.yml', 'web'),
('circle-2.yml', 'web'),
('circle-1.yml', 'web'),
],
)
def test_extends_validation(self):
dictionary = {'extends': None}
def load_config():
return config.make_service_dict('myweb', dictionary, working_dir='tests/fixtures/extends')
self.assertRaisesRegexp(config.ConfigurationError, 'dictionary', load_config)
dictionary['extends'] = {}
self.assertRaises(config.ConfigurationError, load_config)
dictionary['extends']['file'] = 'common.yml'
self.assertRaisesRegexp(config.ConfigurationError, 'service', load_config)
dictionary['extends']['service'] = 'web'
self.assertIsInstance(load_config(), dict)
dictionary['extends']['what'] = 'is this'
self.assertRaisesRegexp(config.ConfigurationError, 'what', load_config)
def test_blacklisted_options(self):
def load_config():
return config.make_service_dict('myweb', {
'extends': {
'file': 'whatever',
'service': 'web',
}
}, '.')
with self.assertRaisesRegexp(config.ConfigurationError, 'links'):
other_config = {'web': {'links': ['db']}}
with mock.patch.object(config, 'load_yaml', return_value=other_config):
print load_config()
with self.assertRaisesRegexp(config.ConfigurationError, 'volumes_from'):
other_config = {'web': {'volumes_from': ['db']}}
with mock.patch.object(config, 'load_yaml', return_value=other_config):
print load_config()
with self.assertRaisesRegexp(config.ConfigurationError, 'net'):
other_config = {'web': {'net': 'container:db'}}
with mock.patch.object(config, 'load_yaml', return_value=other_config):
print load_config()
other_config = {'web': {'net': 'host'}}
with mock.patch.object(config, 'load_yaml', return_value=other_config):
print load_config()
def test_volume_path(self):
dicts = config.load('tests/fixtures/volume-path/docker-compose.yml')
paths = [
'%s:/foo' % os.path.abspath('tests/fixtures/volume-path/common/foo'),
'%s:/bar' % os.path.abspath('tests/fixtures/volume-path/bar'),
]
self.assertEqual(set(dicts[0]['volumes']), set(paths))
| 33.057432
| 102
| 0.529688
|
d1d75a411667690e14f6c98857abdac4851dab9b
| 2,430
|
py
|
Python
|
lib/pycoin/tests/parse_block_test.py
|
AYCHDO/Dominus
|
d7065816febafb6cf0fb1142ff7dc7e454c835ad
|
[
"MIT"
] | 68
|
2015-08-30T13:23:12.000Z
|
2022-02-26T06:59:15.000Z
|
lib/pycoin/tests/parse_block_test.py
|
AYCHDO/Dominus
|
d7065816febafb6cf0fb1142ff7dc7e454c835ad
|
[
"MIT"
] | 44
|
2016-02-14T02:08:00.000Z
|
2020-11-11T09:03:55.000Z
|
lib/pycoin/tests/parse_block_test.py
|
AYCHDO/Dominus
|
d7065816febafb6cf0fb1142ff7dc7e454c835ad
|
[
"MIT"
] | 56
|
2015-08-26T12:49:20.000Z
|
2022-02-17T19:06:36.000Z
|
#!/usr/bin/env python
import io
import unittest
from pycoin.block import Block
from pycoin.serialize import b2h_rev, h2b
class BlockTest(unittest.TestCase):
def test_block(self):
expected_checksum = '0000000000089F7910F6755C10EA2795EC368A29B435D80770AD78493A6FECF1'.lower()
block_data = h2b('010000007480150B299A16BBCE5CCDB1D1BBC65CFC5893B01E6619107C55200000000000790'\
'0A2B203D24C69710AB6A94BEB937E1B1ADD64C2327E268D8C3E5F8B41DBED8796974CED66471B204C3247030'\
'1000000010000000000000000000000000000000000000000000000000000000000000000FFFFFFFF0804ED6'\
'6471B024001FFFFFFFF0100F2052A010000004341045FEE68BAB9915C4EDCA4C680420ED28BBC369ED84D48A'\
'C178E1F5F7EEAC455BBE270DABA06802145854B5E29F0A7F816E2DF906E0FE4F6D5B4C9B92940E4F0EDAC000'\
'000000100000001F7B30415D1A7BF6DB91CB2A272767C6799D721A4178AA328E0D77C199CB3B57F010000008'\
'A4730440220556F61B84F16E637836D2E74B8CB784DE40C28FE3EF93CCB7406504EE9C7CAA5022043BD4749D'\
'4F3F7F831AC696748AD8D8E79AEB4A1C539E742AA3256910FC88E170141049A414D94345712893A828DE57B4C'\
'2054E2F596CDCA9D0B4451BA1CA5F8847830B9BE6E196450E6ABB21C540EA31BE310271AA00A49ED0BA930743'\
'D1ED465BAD0FFFFFFFF0200E1F505000000001976A914529A63393D63E980ACE6FA885C5A89E4F27AA08988AC'\
'C0ADA41A000000001976A9145D17976537F308865ED533CCCFDD76558CA3C8F088AC000000000100000001651'\
'48D894D3922EF5FFDA962BE26016635C933D470C8B0AB7618E869E3F70E3C000000008B48304502207F5779EB'\
'F4834FEAEFF4D250898324EB5C0833B16D7AF4C1CB0F66F50FCF6E85022100B78A65377FD018281E77285EFC3'\
'1E5B9BA7CB7E20E015CF6B7FA3E4A466DD195014104072AD79E0AA38C05FA33DD185F84C17F611E58A8658CE'\
'996D8B04395B99C7BE36529CAB7606900A0CD5A7AEBC6B233EA8E0FE60943054C63620E05E5B85F0426FFFFF'\
'FFF02404B4C00000000001976A914D4CAA8447532CA8EE4C80A1AE1D230A01E22BFDB88AC8013A0DE0100000'\
'01976A9149661A79AE1F6D487AF3420C13E649D6DF3747FC288AC00000000')
# try to parse a block
block = Block.parse(io.BytesIO(block_data))
print(block)
assert b2h_rev(block.hash()) == expected_checksum
for tx in block.txs:
print(tx)
for t in tx.txs_in:
print(" %s" % t)
for t in tx.txs_out:
print(" %s" % t)
block.check_merkle_hash()
def main():
unittest.main()
if __name__ == "__main__":
main()
| 46.730769
| 103
| 0.795473
|
42f06361a6a2115a42a1e83547ec4c64b12fa35b
| 3,293
|
py
|
Python
|
bitjam/__init__.py
|
jalvz/bitjam
|
3abc7839b8552e1148f77a9c566c5b4b8e7fcd44
|
[
"MIT"
] | null | null | null |
bitjam/__init__.py
|
jalvz/bitjam
|
3abc7839b8552e1148f77a9c566c5b4b8e7fcd44
|
[
"MIT"
] | null | null | null |
bitjam/__init__.py
|
jalvz/bitjam
|
3abc7839b8552e1148f77a9c566c5b4b8e7fcd44
|
[
"MIT"
] | null | null | null |
import threading
class CongestionMonitor(object):
def __init__(self, capacity, duration=60):
"""
Monitor to detect traffic congestion
:param capacity: saturation point; ie. how many `allows(obj)` operations
are allowed per time interval
:param duration: time interval; ie. number of seconds after which
an inserted object is forgotten
"""
self.duration = duration
self.capacity = capacity
self._insertions = {}
self._rejections = {}
self._lock = threading.Lock()
def allows(self, obj):
"""
Increases the number of occurrences for its argument
:param obj: any object of any type
:return: True if the CongestionMonitor allows the argument,
False if the CongestionMonitor is saturated and rejects the argument
"""
with self._lock:
c = self._insertions.get(obj, 0)
if c == self.capacity:
self._rejections[obj] = self._rejections.get(obj, 0) + 1
threading.Timer(
self.duration,
lambda: self._del_decr(self._rejections, obj)).start()
return False
else:
self._insertions[obj] = c + 1
threading.Timer(
self.duration,
lambda: self._del_decr(self._insertions, obj)).start()
return True
def count_allowed(self, obj=None):
"""
Counts the number of times for which `allows(obj)` returned True
:param obj: any object of any type
If None, will count the number of different objects allowed
:return: The number of times `obj` has been inserted in the last
`self.duration` seconds
"""
return self._count(self._insertions, obj)
def count_rejected(self, obj=None):
"""
Counts the number of times for which `allows(obj)` returned False
:param obj: any object of any type
If None, will count the number of different objects rejected
:return: The number of times `obj` has been rejected in the last
`self.duration` seconds
"""
return self._count(self._rejections, obj)
def count_observed(self, obj=None):
"""
Counts the number of times for which `allow(obj)` was called
:param obj: any object of any type
:return: The number of times `obj` has been observed in the last
`self.duration` seconds
"""
return self._count(self._rejections, obj) + \
self._count(self._insertions, obj)
def is_saturated(self, obj):
"""
Like `allows(obj)` but without performing side effects
:param obj: any object of any type
:return: The result of calling `allows(obj)` without doing it
"""
return self.count_allowed(obj) == self.capacity
def _count(self, d, k=None):
with self._lock:
if k is None:
return len(d.keys())
else:
return d.get(k, 0)
def _del_decr(self, d, k):
with self._lock:
v = d[k]
if v == 1:
del d[k]
else:
d[k] = v - 1
| 35.031915
| 80
| 0.568175
|
6634590cee5bdcf7fd53f6393946c601c7e242ce
| 2,039
|
py
|
Python
|
venv/Lib/site-packages/sklearn/decomposition/__init__.py
|
Jos33y/student-performance-knn
|
4e965434f52dd6a1380904aa257df1edfaebb3c4
|
[
"MIT"
] | 2
|
2021-05-02T07:59:56.000Z
|
2021-12-14T19:53:13.000Z
|
venv/Lib/site-packages/sklearn/decomposition/__init__.py
|
Jos33y/student-performance-knn
|
4e965434f52dd6a1380904aa257df1edfaebb3c4
|
[
"MIT"
] | 7
|
2021-06-08T21:46:24.000Z
|
2022-03-12T00:35:31.000Z
|
my_env/Lib/site-packages/sklearn/decomposition/__init__.py
|
obulrdy6881/Drowsinss
|
61cb9281d7dd22aee282b517e2fbf500f0ff9935
|
[
"MIT"
] | 1
|
2021-05-02T07:59:59.000Z
|
2021-05-02T07:59:59.000Z
|
"""
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
# TODO: remove me in 0.24 (as well as the noqa markers) and
# import the dict_learning func directly from the ._dict_learning
# module instead.
# Pre-cache the import of the deprecated module so that import
# sklearn.decomposition.dict_learning returns the function as in
# 0.21, instead of the module.
# https://github.com/scikit-learn/scikit-learn/issues/15842
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=FutureWarning)
from .dict_learning import dict_learning
from ._nmf import NMF, non_negative_factorization # noqa
from ._pca import PCA # noqa
from ._incremental_pca import IncrementalPCA # noqa
from ._kernel_pca import KernelPCA # noqa
from ._sparse_pca import SparsePCA, MiniBatchSparsePCA # noqa
from ._truncated_svd import TruncatedSVD # noqa
from ._fastica import FastICA, fastica # noqa
from ._dict_learning import (dict_learning_online,
sparse_encode, DictionaryLearning,
MiniBatchDictionaryLearning, SparseCoder) # noqa
from ._factor_analysis import FactorAnalysis # noqa
from ..utils.extmath import randomized_svd # noqa
from ._lda import LatentDirichletAllocation # noqa
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| 37.759259
| 79
| 0.673369
|
27e9935118cf633c5ae460e16296f55adb0b1439
| 285
|
py
|
Python
|
GUI/21 - keyboard events.py
|
PedroHAlvesS/Exercicios-Python-Curso-em-video
|
7ab187d65ddada252450b048608dc4898e5c67c8
|
[
"MIT"
] | null | null | null |
GUI/21 - keyboard events.py
|
PedroHAlvesS/Exercicios-Python-Curso-em-video
|
7ab187d65ddada252450b048608dc4898e5c67c8
|
[
"MIT"
] | null | null | null |
GUI/21 - keyboard events.py
|
PedroHAlvesS/Exercicios-Python-Curso-em-video
|
7ab187d65ddada252450b048608dc4898e5c67c8
|
[
"MIT"
] | null | null | null |
from tkinter import *
def aleatorio(event):
evento_label.config(text=event.keysym)
janela = Tk()
evento_label = Label(janela, text="aperte qualquer coisa",
font=("Helvetica", 100))
evento_label.pack()
janela.bind("<Key>", aleatorio)
janela.mainloop()
| 15
| 58
| 0.663158
|
db080d1741d35d2f18122f4a5df25bf505f93ce1
| 19,479
|
py
|
Python
|
panels/options.py
|
bennettdc/MCEdit-Unified
|
90abfb170c65b877ac67193e717fa3a3ded635dd
|
[
"0BSD"
] | 237
|
2018-02-04T19:13:31.000Z
|
2022-03-26T03:06:07.000Z
|
panels/options.py
|
bennettdc/MCEdit-Unified
|
90abfb170c65b877ac67193e717fa3a3ded635dd
|
[
"0BSD"
] | 551
|
2015-01-01T02:36:53.000Z
|
2018-02-01T00:03:12.000Z
|
panels/options.py
|
bennettdc/MCEdit-Unified
|
90abfb170c65b877ac67193e717fa3a3ded635dd
|
[
"0BSD"
] | 97
|
2015-01-02T01:31:12.000Z
|
2018-01-22T05:37:47.000Z
|
from __future__ import unicode_literals
import albow
from albow.dialogs import Dialog
from config import config
import pygame
from albow.translate import _, buildTemplate
import sys
import os
import logging
import traceback
import directories
old_lang = None
old_fprop = None
class OptionsPanel(Dialog):
anchor = 'wh'
def __init__(self, mcedit):
Dialog.__init__(self)
self.mcedit = mcedit
self.langs = {}
self.sgnal = {}
self.portableVar = albow.AttrRef(self, 'portableLabelText')
self.saveOldPortable = self.portableVar.get()
self.saveOldConfig = {
config.controls.autobrake: config.controls.autobrake.get(),
config.controls.swapAxes: config.controls.swapAxes.get(),
config.controls.cameraAccel: config.controls.cameraAccel.get(),
config.controls.cameraDrag: config.controls.cameraDrag.get(),
config.controls.cameraMaxSpeed: config.controls.cameraMaxSpeed.get(),
config.controls.cameraBrakingSpeed: config.controls.cameraBrakingSpeed.get(),
config.controls.mouseSpeed: config.controls.mouseSpeed.get(),
config.settings.undoLimit: config.settings.undoLimit.get(),
config.settings.maxCopies: config.settings.maxCopies.get(),
config.controls.invertMousePitch: config.controls.invertMousePitch.get(),
config.settings.spaceHeight: config.settings.spaceHeight.get(),
albow.AttrRef(self, 'blockBuffer'): albow.AttrRef(self, 'blockBuffer').get(),
config.settings.setWindowPlacement: config.settings.setWindowPlacement.get(),
config.settings.rotateBlockBrush: config.settings.rotateBlockBrush.get(),
config.settings.shouldResizeAlert: config.settings.shouldResizeAlert.get(),
config.settings.superSecretSettings: config.settings.superSecretSettings.get(),
config.settings.longDistanceMode: config.settings.longDistanceMode.get(),
config.settings.flyMode: config.settings.flyMode.get(),
config.settings.langCode: config.settings.langCode.get(),
config.settings.compassToggle: config.settings.compassToggle.get(),
config.settings.compassSize: config.settings.compassSize.get(),
config.settings.fontProportion: config.settings.fontProportion.get(),
config.settings.fogIntensity: config.settings.fogIntensity.get(),
config.schematicCopying.cancelCommandBlockOffset: config.schematicCopying.cancelCommandBlockOffset.get()
}
global old_lang
if old_lang == None:
old_lang = config.settings.langCode.get()
global old_fprop
if old_fprop == None:
old_fprop = config.settings.fontProportion.get()
def initComponents(self):
"""Initilize the window components. Call this after translation hs been loaded."""
autoBrakeRow = albow.CheckBoxLabel("Autobrake",
ref=config.controls.autobrake,
tooltipText="Apply brake when not pressing movement keys")
swapAxesRow = albow.CheckBoxLabel("Swap Axes Looking Down",
ref=config.controls.swapAxes,
tooltipText="Change the direction of the Forward and Backward keys when looking down")
cameraAccelRow = albow.FloatInputRow("Camera Acceleration: ",
ref=config.controls.cameraAccel, width=100, min=5.0)
cameraDragRow = albow.FloatInputRow("Camera Drag: ",
ref=config.controls.cameraDrag, width=100, min=1.0)
cameraMaxSpeedRow = albow.FloatInputRow("Camera Max Speed: ",
ref=config.controls.cameraMaxSpeed, width=100, min=1.0)
cameraBrakeSpeedRow = albow.FloatInputRow("Camera Braking Speed: ",
ref=config.controls.cameraBrakingSpeed, width=100,
min=1.0)
mouseSpeedRow = albow.FloatInputRow("Mouse Speed: ",
ref=config.controls.mouseSpeed, width=100, min=0.1,
max=20.0)
undoLimitRow = albow.IntInputRow("Undo Limit: ",
ref=config.settings.undoLimit, width=100, min=0)
maxCopiesRow = albow.IntInputRow("Copy Stack Size: ",
ref=config.settings.maxCopies, width=100, min=0,
tooltipText="Maximum number of copied objects.")
compassSizeRow = albow.IntInputRow("Compass Size (%): ",
ref=config.settings.compassSize, width=100, min=0, max=100)
fontProportion = albow.IntInputRow("Fonts Proportion (%): ",
ref=config.settings.fontProportion, width=100, min=0,
tooltipText="Fonts sizing proportion. The number is a percentage.\nRestart needed!")
albow.resource.font_proportion = config.settings.fontProportion.get()
fogIntensityRow = albow.IntInputRow("Fog Intensity (%): ",
ref=config.settings.fogIntensity, width=100, min=0, max=100)
invertRow = albow.CheckBoxLabel("Invert Mouse",
ref=config.controls.invertMousePitch,
tooltipText="Reverse the up and down motion of the mouse.")
spaceHeightRow = albow.IntInputRow("Low Detail Height",
ref=config.settings.spaceHeight,
tooltipText="When you are this far above the top of the world, move fast and use low-detail mode.")
blockBufferRow = albow.IntInputRow("Block Buffer (MB):",
ref=albow.AttrRef(self, 'blockBuffer'), min=1,
tooltipText="Amount of memory used for temporary storage. When more than this is needed, the disk is used instead.")
setWindowPlacementRow = albow.CheckBoxLabel("Set Window Placement",
ref=config.settings.setWindowPlacement,
tooltipText="Try to save and restore the window position.")
rotateBlockBrushRow = albow.CheckBoxLabel("Rotate block with brush",
ref=config.settings.rotateBlockBrush,
tooltipText="When rotating your brush, also rotate the orientation of the block your brushing with")
compassToggleRow =albow.CheckBoxLabel("Toggle compass",
ref=config.settings.compassToggle)
windowSizeRow = albow.CheckBoxLabel("Window Resize Alert",
ref=config.settings.shouldResizeAlert,
tooltipText="Reminds you that the cursor won't work correctly after resizing the window.")
superSecretSettingsRow = albow.CheckBoxLabel("Super Secret Settings",
ref=config.settings.superSecretSettings,
tooltipText="Weird stuff happen!")
longDistanceRow = albow.CheckBoxLabel("Long-Distance Mode",
ref=config.settings.longDistanceMode,
tooltipText="Always target the farthest block under the cursor, even in mouselook mode.")
flyModeRow = albow.CheckBoxLabel("Fly Mode",
ref=config.settings.flyMode,
tooltipText="Moving forward and Backward will not change your altitude in Fly Mode.")
showCommandsRow = albow.CheckBoxLabel("Show Block Info when hovering",
ref=config.settings.showQuickBlockInfo,
tooltipText="Shows summarized info of some Blocks when hovering over it.")
cancelCommandBlockOffset = albow.CheckBoxLabel("Cancel Command Block Offset",
ref=config.schematicCopying.cancelCommandBlockOffset,
tooltipText="Cancels the command blocks coords changed when copied.")
lng = config.settings.langCode.get()
langs = sorted(self.getLanguageChoices().items())
langNames = [k for k, v in langs]
self.languageButton = albow.ChoiceButton(langNames, choose=self.changeLanguage, doNotTranslate=True)
if self.sgnal[lng] in self.languageButton.choices:
self.languageButton.selectedChoice = self.sgnal[lng]
langButtonRow = albow.Row((albow.Label("Language", tooltipText="Choose your language."), self.languageButton))
portableList = ["Portable", "Fixed"]
self.goPortableButton = goPortableButton = albow.ChoiceButton(portableList, choose=self.togglePortable)
goPortableButton.selectedChoice = self.saveOldPortable
goPortableButton.tooltipText = self.portableButtonTooltip()
goPortableRow = albow.Row((albow.Label("Install Mode"), goPortableButton))
# Disabled Crash Reporting Option
# reportRow = albow.CheckBoxLabel("Report Errors",
# ref=config.settings.reportCrashes,
# tooltipText="Automatically report errors to the developer.")
self.inputs = (
spaceHeightRow,
cameraAccelRow,
cameraDragRow,
cameraMaxSpeedRow,
cameraBrakeSpeedRow,
blockBufferRow,
mouseSpeedRow,
undoLimitRow,
maxCopiesRow,
compassSizeRow,
fontProportion,
fogIntensityRow,
)
options = (
longDistanceRow,
flyModeRow,
autoBrakeRow,
swapAxesRow,
invertRow,
superSecretSettingsRow,
rotateBlockBrushRow,
compassToggleRow,
showCommandsRow,
cancelCommandBlockOffset,
langButtonRow,
) + (
((sys.platform == "win32" and pygame.version.vernum == (1, 9, 1)) and (windowSizeRow,) or ())
) + (
(sys.platform == "win32") and (setWindowPlacementRow,) or ()
) + (
(not sys.platform == "darwin") and (goPortableRow,) or ()
)
rightcol = albow.Column(options, align='r')
leftcol = albow.Column(self.inputs, align='r')
optionsColumn = albow.Column((albow.Label("Options"),
albow.Row((leftcol, rightcol), align="t")))
settingsRow = albow.Row((optionsColumn,))
buttonsRow = albow.Row((albow.Button("OK", action=self.dismiss), albow.Button("Cancel", action=self.cancel)))
resetToDefaultRow = albow.Row((albow.Button("Reset to default", action=self.resetDefault),))
optionsColumn = albow.Column((settingsRow, buttonsRow, resetToDefaultRow))
optionsColumn.key_down = self.key_down
self.add(optionsColumn)
self.shrink_wrap()
@property
def blockBuffer(self):
return config.settings.blockBuffer.get() / 1048576
@blockBuffer.setter
def blockBuffer(self, val):
config.settings.blockBuffer.set(int(val * 1048576))
def getLanguageChoices(self, current=None):
files = os.listdir(albow.translate.langPath)
langs = {}
sgnal = {}
for file in files:
name, ext = os.path.splitext(file)
if ext == ".trn" and len(name) == 5 and name[2] == "_":
langName = albow.translate.getLangName(file)
langs[langName] = name
sgnal[name] = langName
if "English (US)" not in langs.keys():
langs[u"English (US)"] = "en_US"
sgnal["en_US"] = u"English (US)"
self.langs = langs
self.sgnal = sgnal
logging.debug("Detected languages: %s"%self.langs)
return langs
def changeLanguage(self):
if albow.translate.buildTemplate:
self.languageButton.selectedChoice = 'English (US)'
return
langName = self.languageButton.selectedChoice
if langName not in self.langs:
lng = "en_US"
else:
lng = self.langs[langName]
config.settings.langCode.set(lng)
#-# Translation live update preparation
logging.debug('*** Language change detected.')
logging.debug(' Former language: %s.'%albow.translate.getLang())
logging.debug(' New language: %s.'%lng)
#albow.translate.langPath = os.sep.join((directories.getDataDir(), "lang"))
albow.translate.langPath = directories.getDataFile('lang')
update = albow.translate.setLang(lng)[2]
logging.debug(' Update done? %s (Magic %s)'%(update, update or lng == 'en_US'))
self.mcedit.root.set_update_ui(update or lng == 'en_US')
self.mcedit.root.set_update_ui(False)
self.mcedit.editor.set_update_ui(update or lng == 'en_US')
self.mcedit.editor.set_update_ui(False)
#-#
@staticmethod
def portableButtonTooltip():
return (
"Click to make your MCEdit install self-contained by moving the settings and schematics into the program folder",
"Click to make your MCEdit install persistent by moving the settings and schematics into your Documents folder")[
directories.portable]
@property
def portableLabelText(self):
return ("Portable", "Fixed")[1 - directories.portable]
@portableLabelText.setter
def portableLabelText(self, *args, **kwargs):
pass
def togglePortable(self):
if sys.platform == "darwin":
return False
textChoices = [
_("This will make your MCEdit \"portable\" by moving your settings and schematics into the same folder as {0}. Continue?").format(
(sys.platform == "darwin" and _("the MCEdit application") or _("MCEditData"))),
_("This will move your settings and schematics to your Documents folder. Continue?"),
]
useExisting = False
alertText = textChoices[directories.portable]
if albow.ask(alertText) == "OK":
if [directories.hasPreviousPortableInstallation, directories.hasPreviousFixedInstallation][directories.portable]():
asked = albow.ask("Found a previous %s installation"%["portable", "fixed"][directories.portable], responses=["Use", "Overwrite", "Cancel"])
if asked == "Use":
useExisting = True
elif asked == "Overwrite":
useExisting = False
elif asked == "Cancel":
return False
try:
[directories.goPortable, directories.goFixed][directories.portable](useExisting)
except Exception as e:
traceback.print_exc()
albow.alert(_(u"Error while moving files: {0}").format(repr(e)))
else:
self.goPortableButton.selectedChoice = self.saveOldPortable
self.goPortableButton.tooltipText = self.portableButtonTooltip()
return True
def dismiss(self, *args, **kwargs):
"""Used to change the font proportion."""
# If font proportion setting has changed, update the UI.
if config.settings.fontProportion.get() != self.saveOldConfig[config.settings.fontProportion]:
albow.resource.reload_fonts(proportion=config.settings.fontProportion.get())
self.mcedit.root.set_update_ui(True)
self.mcedit.root.set_update_ui(False)
self.mcedit.editor.set_update_ui(True)
self.mcedit.editor.set_update_ui(False)
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
self.saveOldConfig[key] = key.get()
config.save()
Dialog.dismiss(self, *args, **kwargs)
def cancel(self, *args, **kwargs):
Changes = False
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
if key.get() != self.saveOldConfig[key]:
Changes = True
oldLanguage = self.saveOldConfig[config.settings.langCode]
if config.settings.langCode.get() != oldLanguage:
Changes = True
newPortable = self.portableVar.get()
if newPortable != self.saveOldPortable:
Changes = True
if not Changes:
Dialog.dismiss(self, *args, **kwargs)
return
result = albow.ask("Do you want to save your changes?", ["Save", "Don't Save", "Cancel"])
if result == "Cancel":
return
if result == "Save":
self.dismiss(*args, **kwargs)
return
if config.settings.langCode.get() != oldLanguage:
self.languageButton.selectedChoice = self.sgnal[oldLanguage]
self.changeLanguage()
if _(newPortable) != _(self.saveOldPortable):
self.portableVar.set(newPortable)
self.togglePortable()
for key in self.saveOldConfig.keys():
key.set(self.saveOldConfig[key])
config.save()
Dialog.dismiss(self, *args, **kwargs)
def resetDefault(self):
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
if "AttrRef" in str(key):
key.set(config.settings.blockBuffer.default / 1048576)
elif "lang" not in str(key):
key.set(key.default)
if config.settings.langCode.get() != "en_US":
config.settings.langCode.set("en_US")
self.changeLanguage()
if "Fixed" != self.portableVar.get():
self.portableVar.set("Fixed")
self.togglePortable()
config.save()
def reshowNumberFields(self):
for key in self.inputs:
key.subwidgets[1].editing = False
def dispatch_key(self, name, evt):
super(OptionsPanel, self).dispatch_key(name, evt)
if name == "key_down":
keyname = self.get_root().getKey(evt)
if keyname == 'Escape':
self.cancel()
| 47.742647
| 163
| 0.565378
|
ba71228db39bb8282dd7b52a781c7a844d53e9b1
| 61,527
|
py
|
Python
|
salt/modules/win_iis.py
|
ipmb/salt
|
699912ef9cde28040378aa53d6c7a12d8af756b1
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/win_iis.py
|
ipmb/salt
|
699912ef9cde28040378aa53d6c7a12d8af756b1
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/win_iis.py
|
ipmb/salt
|
699912ef9cde28040378aa53d6c7a12d8af756b1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Microsoft IIS site management via WebAdministration powershell module
:maintainer: Shane Lee <slee@saltstack.com>, Robert Booth <rbooth@saltstack.com>
:platform: Windows
:depends: PowerShell
:depends: WebAdministration module (PowerShell) (IIS)
.. versionadded:: 2016.3.0
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import decimal
import logging
import os
# Import salt libs
import salt.utils.json
import salt.utils.platform
from salt.ext.six.moves import range
from salt.exceptions import SaltInvocationError, CommandExecutionError
from salt.ext import six
log = logging.getLogger(__name__)
_DEFAULT_APP = '/'
_VALID_PROTOCOLS = ('ftp', 'http', 'https')
_VALID_SSL_FLAGS = tuple(range(0, 4))
# Define the module's virtual name
__virtualname__ = 'win_iis'
def __virtual__():
'''
Load only on Windows
Requires PowerShell and the WebAdministration module
'''
if not salt.utils.platform.is_windows():
return False, 'Only available on Windows systems'
powershell_info = __salt__['cmd.shell_info']('powershell', True)
if not powershell_info['installed']:
return False, 'PowerShell not available'
if 'WebAdministration' not in powershell_info['modules']:
return False, 'IIS is not installed'
return __virtualname__
def _get_binding_info(host_header='', ip_address='*', port=80):
'''
Combine the host header, IP address, and TCP port into bindingInformation
format. Binding Information specifies information to communicate with a
site. It includes the IP address, the port number, and an optional host
header (usually a host name) to communicate with the site.
Args:
host_header (str): Usually a hostname
ip_address (str): The IP address
port (int): The port
Returns:
str: A properly formatted bindingInformation string (IP:port:hostheader)
eg: 192.168.0.12:80:www.contoso.com
'''
return r':'.join([ip_address, str(port), host_header.replace(' ', '')])
def _list_certs(certificate_store='My'):
'''
List details of available certificates in the LocalMachine certificate
store.
Args:
certificate_store (str): The name of the certificate store on the local
machine.
Returns:
dict: A dictionary of certificates found in the store
'''
ret = dict()
blacklist_keys = ['DnsNameList', 'Thumbprint']
ps_cmd = ['Get-ChildItem',
'-Path', r"'Cert:\LocalMachine\{0}'".format(certificate_store),
'|',
'Select-Object DnsNameList, SerialNumber, Subject, Thumbprint, Version']
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
cert_info = dict()
for key in item:
if key not in blacklist_keys:
cert_info[key.lower()] = item[key]
cert_info['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']]
ret[item['Thumbprint']] = cert_info
return ret
def _iisVersion():
pscmd = []
pscmd.append(r"Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\InetStp\\")
pscmd.append(' | Select-Object MajorVersion, MinorVersion')
cmd_ret = _srvmgr(pscmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
log.error('Unable to parse return data as Json.')
return -1
return decimal.Decimal("{0}.{1}".format(items[0]['MajorVersion'], items[0]['MinorVersion']))
def _srvmgr(cmd, return_json=False):
'''
Execute a powershell command from the WebAdministration PS module.
Args:
cmd (list): The command to execute in a list
return_json (bool): True formats the return in JSON, False just returns
the output of the command.
Returns:
str: The output from the command
'''
if isinstance(cmd, list):
cmd = ' '.join(cmd)
if return_json:
cmd = 'ConvertTo-Json -Compress -Depth 4 -InputObject @({0})' \
''.format(cmd)
cmd = 'Import-Module WebAdministration; {0}'.format(cmd)
ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True)
if ret['retcode'] != 0:
msg = 'Unable to execute command: {0}\nError: {1}' \
''.format(cmd, ret['stderr'])
log.error(msg)
return ret
def list_sites():
'''
List all the currently deployed websites.
Returns:
dict: A dictionary of the IIS sites and their properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_sites
'''
ret = dict()
ps_cmd = ['Get-ChildItem',
'-Path', r"'IIS:\Sites'",
'|',
'Select-Object applicationPool, Bindings, ID, Name, PhysicalPath, State']
keep_keys = ('certificateHash', 'certificateStoreName', 'protocol', 'sslFlags')
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
bindings = dict()
for binding in item['bindings']['Collection']:
# Ignore bindings which do not have host names
if binding['protocol'] not in ['http', 'https']:
continue
filtered_binding = dict()
for key in binding:
if key in keep_keys:
filtered_binding.update({key.lower(): binding[key]})
binding_info = binding['bindingInformation'].split(':', 2)
ipaddress, port, hostheader = [element.strip() for element in binding_info]
filtered_binding.update({'hostheader': hostheader,
'ipaddress': ipaddress,
'port': port})
bindings[binding['bindingInformation']] = filtered_binding
ret[item['name']] = {'apppool': item['applicationPool'],
'bindings': bindings,
'id': item['id'],
'state': item['state'],
'sourcepath': item['physicalPath']}
if not ret:
log.warning('No sites found in output: {0}'.format(cmd_ret['stdout']))
return ret
def create_site(name, sourcepath, apppool='', hostheader='',
ipaddress='*', port=80, protocol='http'):
'''
Create a basic website in IIS.
.. note::
This function only validates against the site name, and will return True
even if the site already exists with a different configuration. It will
not modify the configuration of an existing site.
Args:
name (str): The IIS site name.
sourcepath (str): The physical path of the IIS site.
apppool (str): The name of the IIS application pool.
hostheader (str): The host header of the binding. Usually the hostname
or website name, ie: www.contoso.com
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
protocol (str): The application protocol of the binding. (http, https,
etc.)
Returns:
bool: True if successful, otherwise False.
.. note::
If an application pool is specified, and that application pool does not
already exist, it will be created.
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_site name='My Test Site' sourcepath='c:\\stage' apppool='TestPool'
'''
protocol = str(protocol).lower()
site_path = r'IIS:\Sites\{0}'.format(name)
binding_info = _get_binding_info(hostheader, ipaddress, port)
current_sites = list_sites()
if name in current_sites:
log.debug("Site '{0}' already present.".format(name))
return True
if protocol not in _VALID_PROTOCOLS:
message = ("Invalid protocol '{0}' specified. Valid formats:"
' {1}').format(protocol, _VALID_PROTOCOLS)
raise SaltInvocationError(message)
ps_cmd = ['New-Item',
'-Path', r"'{0}'".format(site_path),
'-PhysicalPath', r"'{0}'".format(sourcepath),
'-Bindings', "@{{ protocol='{0}'; bindingInformation='{1}' }};"
"".format(protocol, binding_info)]
if apppool:
if apppool in list_apppools():
log.debug('Utilizing pre-existing application pool: {0}'
''.format(apppool))
else:
log.debug('Application pool will be created: {0}'.format(apppool))
create_apppool(apppool)
ps_cmd.extend(['Set-ItemProperty',
'-Path', "'{0}'".format(site_path),
'-Name', 'ApplicationPool',
'-Value', "'{0}'".format(apppool)])
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to create site: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
log.debug('Site created successfully: {0}'.format(name))
return True
def modify_site(name, sourcepath=None, apppool=None):
'''
Modify a basic website in IIS.
.. versionadded:: 2017.7.0
Args:
name (str): The IIS site name.
sourcepath (str): The physical path of the IIS site.
apppool (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False.
.. note::
If an application pool is specified, and that application pool does not
already exist, it will be created.
CLI Example:
.. code-block:: bash
salt '*' win_iis.modify_site name='My Test Site' sourcepath='c:\\new_path' apppool='NewTestPool'
'''
site_path = r'IIS:\Sites\{0}'.format(name)
current_sites = list_sites()
if name not in current_sites:
log.debug("Site '{0}' not defined.".format(name))
return False
ps_cmd = list()
if sourcepath:
ps_cmd.extend(['Set-ItemProperty',
'-Path', r"'{0}'".format(site_path),
'-Name', 'PhysicalPath',
'-Value', r"'{0}'".format(sourcepath)])
if apppool:
if apppool in list_apppools():
log.debug('Utilizing pre-existing application pool: {0}'
''.format(apppool))
else:
log.debug('Application pool will be created: {0}'.format(apppool))
create_apppool(apppool)
# If ps_cmd isn't empty, we need to add a semi-colon to run two commands
if ps_cmd:
ps_cmd.append(';')
ps_cmd.extend(['Set-ItemProperty',
'-Path', r"'{0}'".format(site_path),
'-Name', 'ApplicationPool',
'-Value', r"'{0}'".format(apppool)])
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to modify site: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
log.debug('Site modified successfully: {0}'.format(name))
return True
def remove_site(name):
'''
Delete a website from IIS.
Args:
name (str): The IIS site name.
Returns:
bool: True if successful, otherwise False
.. note::
This will not remove the application pool used by the site.
CLI Example:
.. code-block:: bash
salt '*' win_iis.remove_site name='My Test Site'
'''
current_sites = list_sites()
if name not in current_sites:
log.debug('Site already absent: {0}'.format(name))
return True
ps_cmd = ['Remove-WebSite', '-Name', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to remove site: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
log.debug('Site removed successfully: {0}'.format(name))
return True
def stop_site(name):
'''
Stop a Web Site in IIS.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the website to stop.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.stop_site name='My Test Site'
'''
ps_cmd = ['Stop-WebSite', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret['retcode'] == 0
def start_site(name):
'''
Start a Web Site in IIS.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the website to start.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.start_site name='My Test Site'
'''
ps_cmd = ['Start-WebSite', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret['retcode'] == 0
def restart_site(name):
'''
Restart a Web Site in IIS.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the website to restart.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.restart_site name='My Test Site'
'''
return stop_site(name) and start_site(name)
def list_bindings(site):
'''
Get all configured IIS bindings for the specified site.
Args:
site (str): The name if the IIS Site
Returns:
dict: A dictionary of the binding names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_bindings site
'''
ret = dict()
sites = list_sites()
if site not in sites:
log.warning('Site not found: {0}'.format(site))
return ret
ret = sites[site]['bindings']
if not ret:
log.warning('No bindings found for site: {0}'.format(site))
return ret
def create_binding(site, hostheader='', ipaddress='*', port=80, protocol='http',
sslflags=None):
'''
Create an IIS Web Binding.
.. note::
This function only validates against the binding
ipaddress:port:hostheader combination, and will return True even if the
binding already exists with a different configuration. It will not
modify the configuration of an existing binding.
Args:
site (str): The IIS site name.
hostheader (str): The host header of the binding. Usually a hostname.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
protocol (str): The application protocol of the binding.
sslflags (str): The flags representing certificate type and storage of
the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_binding site='site0' hostheader='example.com' ipaddress='*' port='80'
'''
protocol = str(protocol).lower()
name = _get_binding_info(hostheader, ipaddress, port)
if protocol not in _VALID_PROTOCOLS:
message = ("Invalid protocol '{0}' specified. Valid formats:"
' {1}').format(protocol, _VALID_PROTOCOLS)
raise SaltInvocationError(message)
if sslflags:
sslflags = int(sslflags)
if sslflags not in _VALID_SSL_FLAGS:
message = ("Invalid sslflags '{0}' specified. Valid sslflags range:"
' {1}..{2}').format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[-1])
raise SaltInvocationError(message)
current_bindings = list_bindings(site)
if name in current_bindings:
log.debug('Binding already present: {0}'.format(name))
return True
if sslflags:
ps_cmd = ['New-WebBinding',
'-Name', "'{0}'".format(site),
'-HostHeader', "'{0}'".format(hostheader),
'-IpAddress', "'{0}'".format(ipaddress),
'-Port', "'{0}'".format(str(port)),
'-Protocol', "'{0}'".format(protocol),
'-SslFlags', '{0}'.format(sslflags)]
else:
ps_cmd = ['New-WebBinding',
'-Name', "'{0}'".format(site),
'-HostHeader', "'{0}'".format(hostheader),
'-IpAddress', "'{0}'".format(ipaddress),
'-Port', "'{0}'".format(str(port)),
'-Protocol', "'{0}'".format(protocol)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to create binding: {0}\nError: {1}' \
''.format(site, cmd_ret['stderr'])
raise CommandExecutionError(msg)
if name in list_bindings(site):
log.debug('Binding created successfully: {0}'.format(site))
return True
log.error('Unable to create binding: {0}'.format(site))
return False
def modify_binding(site, binding, hostheader=None, ipaddress=None, port=None,
sslflags=None):
'''
Modify an IIS Web Binding. Use ``site`` and ``binding`` to target the
binding.
.. versionadded:: 2017.7.0
Args:
site (str): The IIS site name.
binding (str): The binding to edit. This is a combination of the
IP address, port, and hostheader. It is in the following format:
ipaddress:port:hostheader. For example, ``*:80:`` or
``*:80:salt.com``
hostheader (str): The host header of the binding. Usually the hostname.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
sslflags (str): The flags representing certificate type and storage of
the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
The following will seat the host header of binding ``*:80:`` for ``site0``
to ``example.com``
.. code-block:: bash
salt '*' win_iis.modify_binding site='site0' binding='*:80:' hostheader='example.com'
'''
if sslflags is not None and sslflags not in _VALID_SSL_FLAGS:
message = ("Invalid sslflags '{0}' specified. Valid sslflags range:"
' {1}..{2}').format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[-1])
raise SaltInvocationError(message)
current_sites = list_sites()
if site not in current_sites:
log.debug("Site '{0}' not defined.".format(site))
return False
current_bindings = list_bindings(site)
if binding not in current_bindings:
log.debug("Binding '{0}' not defined.".format(binding))
return False
# Split out the binding so we can insert new ones
# Use the existing value if not passed
i, p, h = binding.split(':')
new_binding = ':'.join([ipaddress if ipaddress is not None else i,
str(port) if port is not None else str(p),
hostheader if hostheader is not None else h])
if new_binding != binding:
ps_cmd = ['Set-WebBinding',
'-Name', "'{0}'".format(site),
'-BindingInformation', "'{0}'".format(binding),
'-PropertyName', 'BindingInformation',
'-Value', "'{0}'".format(new_binding)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to modify binding: {0}\nError: {1}' \
''.format(binding, cmd_ret['stderr'])
raise CommandExecutionError(msg)
if sslflags is not None and \
sslflags != current_sites[site]['bindings'][binding]['sslflags']:
ps_cmd = ['Set-WebBinding',
'-Name', "'{0}'".format(site),
'-BindingInformation', "'{0}'".format(new_binding),
'-PropertyName', 'sslflags',
'-Value', "'{0}'".format(sslflags)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to modify binding SSL Flags: {0}\nError: {1}' \
''.format(sslflags, cmd_ret['stderr'])
raise CommandExecutionError(msg)
log.debug('Binding modified successfully: {0}'.format(binding))
return True
def remove_binding(site, hostheader='', ipaddress='*', port=80):
'''
Remove an IIS binding.
Args:
site (str): The IIS site name.
hostheader (str): The host header of the binding.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.remove_binding site='site0' hostheader='example.com' ipaddress='*' port='80'
'''
name = _get_binding_info(hostheader, ipaddress, port)
current_bindings = list_bindings(site)
if name not in current_bindings:
log.debug('Binding already absent: {0}'.format(name))
return True
ps_cmd = ['Remove-WebBinding',
'-HostHeader', "'{0}'".format(hostheader),
'-IpAddress', "'{0}'".format(ipaddress),
'-Port', "'{0}'".format(port)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to remove binding: {0}\nError: {1}' \
''.format(site, cmd_ret['stderr'])
raise CommandExecutionError(msg)
if name not in list_bindings(site):
log.debug('Binding removed successfully: {0}'.format(site))
return True
log.error('Unable to remove binding: {0}'.format(site))
return False
def list_cert_bindings(site):
'''
List certificate bindings for an IIS site.
.. versionadded:: 2016.11.0
Args:
site (str): The IIS site name.
Returns:
dict: A dictionary of the binding names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_bindings site
'''
ret = dict()
sites = list_sites()
if site not in sites:
log.warning('Site not found: {0}'.format(site))
return ret
for binding in sites[site]['bindings']:
if sites[site]['bindings'][binding]['certificatehash']:
ret[binding] = sites[site]['bindings'][binding]
if not ret:
log.warning('No certificate bindings found for site: {0}'.format(site))
return ret
def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
sslflags=0):
'''
Assign a certificate to an IIS Web Binding.
.. versionadded:: 2016.11.0
.. note::
The web binding that the certificate is being assigned to must already
exist.
Args:
name (str): The thumbprint of the certificate.
site (str): The IIS site name.
hostheader (str): The host header of the binding.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
sslflags (int): Flags representing certificate type and certificate storage of the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_cert_binding name='AAA000' site='site0' hostheader='example.com' ipaddress='*' port='443'
'''
name = str(name).upper()
binding_info = _get_binding_info(hostheader, ipaddress, port)
if _iisVersion() < 8:
# IIS 7.5 and earlier don't support SNI for HTTPS, therefore cert bindings don't contain the host header
binding_info = binding_info.rpartition(':')[0] + ':'
binding_path = r"IIS:\SslBindings\{0}".format(binding_info.replace(':', '!'))
if sslflags not in _VALID_SSL_FLAGS:
message = ("Invalid sslflags '{0}' specified. Valid sslflags range: "
"{1}..{2}").format(sslflags, _VALID_SSL_FLAGS[0],
_VALID_SSL_FLAGS[-1])
raise SaltInvocationError(message)
# Verify that the target binding exists.
current_bindings = list_bindings(site)
if binding_info not in current_bindings:
log.error('Binding not present: {0}'.format(binding_info))
return False
# Check to see if the certificate is already assigned.
current_name = None
for current_binding in current_bindings:
if binding_info == current_binding:
current_name = current_bindings[current_binding]['certificatehash']
log.debug('Current certificate thumbprint: {0}'.format(current_name))
log.debug('New certificate thumbprint: {0}'.format(name))
if name == current_name:
log.debug('Certificate already present for binding: {0}'.format(name))
return True
# Verify that the certificate exists.
certs = _list_certs()
if name not in certs:
log.error('Certificate not present: {0}'.format(name))
return False
if _iisVersion() < 8:
# IIS 7.5 and earlier have different syntax for associating a certificate with a site
# Modify IP spec to IIS 7.5 format
iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!")
# win 2008 uses the following format: ip!port and not ip!port!
if iis7path.endswith("!"):
iis7path = iis7path[:-1]
ps_cmd = ['New-Item',
'-Path', "'{0}'".format(iis7path),
'-Thumbprint', "'{0}'".format(name)]
else:
ps_cmd = ['New-Item',
'-Path', "'{0}'".format(binding_path),
'-Thumbprint', "'{0}'".format(name),
'-SSLFlags', '{0}'.format(sslflags)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to create certificate binding: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
new_cert_bindings = list_cert_bindings(site)
if binding_info not in new_cert_bindings:
log.error('Binding not present: {0}'.format(binding_info))
return False
if name == new_cert_bindings[binding_info]['certificatehash']:
log.debug('Certificate binding created successfully: {0}'.format(name))
return True
log.error('Unable to create certificate binding: {0}'.format(name))
return False
def remove_cert_binding(name, site, hostheader='', ipaddress='*', port=443):
'''
Remove a certificate from an IIS Web Binding.
.. versionadded:: 2016.11.0
.. note::
This function only removes the certificate from the web binding. It does
not remove the web binding itself.
Args:
name (str): The thumbprint of the certificate.
site (str): The IIS site name.
hostheader (str): The host header of the binding.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.remove_cert_binding name='AAA000' site='site0' hostheader='example.com' ipaddress='*' port='443'
'''
name = str(name).upper()
binding_info = _get_binding_info(hostheader, ipaddress, port)
# Child items of IIS:\SslBindings do not return populated host header info
# in all circumstances, so it's necessary to use IIS:\Sites instead.
ps_cmd = ['$Site = Get-ChildItem', '-Path', r"'IIS:\Sites'",
'|', 'Where-Object', r" {{ $_.Name -Eq '{0}' }};".format(site),
'$Binding = $Site.Bindings.Collection',
r"| Where-Object { $_.bindingInformation",
r"-Eq '{0}' }};".format(binding_info),
'$Binding.RemoveSslCertificate()']
# Verify that the binding exists for the site, and that the target
# certificate is assigned to the binding.
current_cert_bindings = list_cert_bindings(site)
if binding_info not in current_cert_bindings:
log.warning('Binding not found: {0}'.format(binding_info))
return True
if name != current_cert_bindings[binding_info]['certificatehash']:
log.debug('Certificate binding already absent: {0}'.format(name))
return True
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to remove certificate binding: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
new_cert_bindings = list_cert_bindings(site)
if binding_info not in new_cert_bindings:
log.warning('Binding not found: {0}'.format(binding_info))
return True
if name != new_cert_bindings[binding_info]['certificatehash']:
log.debug('Certificate binding removed successfully: {0}'.format(name))
return True
log.error('Unable to remove certificate binding: {0}'.format(name))
return False
def list_apppools():
'''
List all configured IIS application pools.
Returns:
dict: A dictionary of IIS application pools and their details.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_apppools
'''
ret = dict()
ps_cmd = []
ps_cmd.append(r"Get-ChildItem -Path 'IIS:\AppPools' | Select-Object Name, State")
# Include the equivalent of output from the Applications column, since this
# isn't a normal property, we have to populate it via filtered output from
# the Get-WebConfigurationProperty cmdlet.
ps_cmd.append(r", @{ Name = 'Applications'; Expression = { $AppPool = $_.Name;")
ps_cmd.append("$AppPath = 'machine/webroot/apphost';")
ps_cmd.append("$FilterBase = '/system.applicationHost/sites/site/application';")
ps_cmd.append('$FilterBase += "[@applicationPool = \'$($AppPool)\' and @path";')
ps_cmd.append('$FilterRoot = "$($FilterBase) = \'/\']/parent::*";')
ps_cmd.append('$FilterNonRoot = "$($FilterBase) != \'/\']";')
ps_cmd.append('Get-WebConfigurationProperty -Filter $FilterRoot -PsPath $AppPath -Name Name')
ps_cmd.append(r'| ForEach-Object { $_.Value };')
ps_cmd.append('Get-WebConfigurationProperty -Filter $FilterNonRoot -PsPath $AppPath -Name Path')
ps_cmd.append(r"| ForEach-Object { $_.Value } | Where-Object { $_ -ne '/' }")
ps_cmd.append('} }')
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
applications = list()
# If there are no associated apps, Applications will be an empty dict,
# if there is one app, it will be a string, and if there are multiple,
# it will be a dict with 'Count' and 'value' as the keys.
if isinstance(item['Applications'], dict):
if 'value' in item['Applications']:
applications += item['Applications']['value']
else:
applications.append(item['Applications'])
ret[item['name']] = {'state': item['state'], 'applications': applications}
if not ret:
log.warning('No application pools found in output: {0}'
''.format(cmd_ret['stdout']))
return ret
def create_apppool(name):
'''
Create an IIS application pool.
.. note::
This function only validates against the application pool name, and will
return True even if the application pool already exists with a different
configuration. It will not modify the configuration of an existing
application pool.
Args:
name (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_apppool name='MyTestPool'
'''
current_apppools = list_apppools()
apppool_path = r'IIS:\AppPools\{0}'.format(name)
if name in current_apppools:
log.debug("Application pool '{0}' already present.".format(name))
return True
ps_cmd = ['New-Item', '-Path', r"'{0}'".format(apppool_path)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to create application pool: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
log.debug('Application pool created successfully: {0}'.format(name))
return True
def remove_apppool(name):
'''
Remove an IIS application pool.
Args:
name (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.remove_apppool name='MyTestPool'
'''
current_apppools = list_apppools()
apppool_path = r'IIS:\AppPools\{0}'.format(name)
if name not in current_apppools:
log.debug('Application pool already absent: {0}'.format(name))
return True
ps_cmd = ['Remove-Item', '-Path', r"'{0}'".format(apppool_path), '-Recurse']
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to remove application pool: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
log.debug('Application pool removed successfully: {0}'.format(name))
return True
def stop_apppool(name):
'''
Stop an IIS application pool.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the App Pool to stop.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.stop_apppool name='MyTestPool'
'''
ps_cmd = ['Stop-WebAppPool', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret['retcode'] == 0
def start_apppool(name):
'''
Start an IIS application pool.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the App Pool to start.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.start_apppool name='MyTestPool'
'''
ps_cmd = ['Start-WebAppPool', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret['retcode'] == 0
def restart_apppool(name):
'''
Restart an IIS application pool.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.restart_apppool name='MyTestPool'
'''
ps_cmd = ['Restart-WebAppPool', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret['retcode'] == 0
def get_container_setting(name, container, settings):
'''
Get the value of the setting for the IIS container.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS container.
container (str): The type of IIS container. The container types are:
AppPools, Sites, SslBindings
settings (dict): A dictionary of the setting names and their values.
Returns:
dict: A dictionary of the provided settings and their values.
CLI Example:
.. code-block:: bash
salt '*' win_iis.get_container_setting name='MyTestPool' container='AppPools'
settings="['processModel.identityType']"
'''
ret = dict()
ps_cmd = list()
ps_cmd_validate = list()
container_path = r"IIS:\{0}\{1}".format(container, name)
if not settings:
log.warning('No settings provided')
return ret
ps_cmd.append(r'$Settings = @{};')
for setting in settings:
# Build the commands to verify that the property names are valid.
ps_cmd_validate.extend(['Get-ItemProperty',
'-Path', "'{0}'".format(container_path),
'-Name', "'{0}'".format(setting),
'-ErrorAction', 'Stop',
'|', 'Out-Null;'])
# Some ItemProperties are Strings and others are ConfigurationAttributes.
# Since the former doesn't have a Value property, we need to account
# for this.
ps_cmd.append("$Property = Get-ItemProperty -Path '{0}'".format(container_path))
ps_cmd.append("-Name '{0}' -ErrorAction Stop;".format(setting))
ps_cmd.append(r'if (([String]::IsNullOrEmpty($Property) -eq $False) -and')
ps_cmd.append(r"($Property.GetType()).Name -eq 'ConfigurationAttribute') {")
ps_cmd.append(r'$Property = $Property | Select-Object')
ps_cmd.append(r'-ExpandProperty Value };')
ps_cmd.append("$Settings['{0}'] = [String] $Property;".format(setting))
ps_cmd.append(r'$Property = $Null;')
# Validate the setting names that were passed in.
cmd_ret = _srvmgr(cmd=ps_cmd_validate, return_json=True)
if cmd_ret['retcode'] != 0:
message = 'One or more invalid property names were specified for the provided container.'
raise SaltInvocationError(message)
ps_cmd.append('$Settings')
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
if isinstance(items, list):
ret.update(items[0])
else:
ret.update(items)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
return ret
def set_container_setting(name, container, settings):
'''
Set the value of the setting for an IIS container.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS container.
container (str): The type of IIS container. The container types are:
AppPools, Sites, SslBindings
settings (dict): A dictionary of the setting names and their values.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.set_container_setting name='MyTestPool' container='AppPools'
settings="{'managedPipeLineMode': 'Integrated'}"
'''
identityType_map2string = {'0': 'LocalSystem', '1': 'LocalService', '2': 'NetworkService', '3': 'SpecificUser', '4': 'ApplicationPoolIdentity'}
identityType_map2numeric = {'LocalSystem': '0', 'LocalService': '1', 'NetworkService': '2', 'SpecificUser': '3', 'ApplicationPoolIdentity': '4'}
ps_cmd = list()
container_path = r"IIS:\{0}\{1}".format(container, name)
if not settings:
log.warning('No settings provided')
return False
# Treat all values as strings for the purpose of comparing them to existing values.
for setting in settings:
settings[setting] = str(settings[setting])
current_settings = get_container_setting(
name=name, container=container, settings=settings.keys())
if settings == current_settings:
log.debug('Settings already contain the provided values.')
return True
for setting in settings:
# If the value is numeric, don't treat it as a string in PowerShell.
try:
complex(settings[setting])
value = settings[setting]
except ValueError:
value = "'{0}'".format(settings[setting])
# Map to numeric to support server 2008
if setting == 'processModel.identityType' and settings[setting] in identityType_map2numeric.keys():
value = identityType_map2numeric[settings[setting]]
ps_cmd.extend(['Set-ItemProperty',
'-Path', "'{0}'".format(container_path),
'-Name', "'{0}'".format(setting),
'-Value', '{0};'.format(value)])
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to set settings for {0}: {1}'.format(container, name)
raise CommandExecutionError(msg)
# Get the fields post-change so that we can verify tht all values
# were modified successfully. Track the ones that weren't.
new_settings = get_container_setting(
name=name, container=container, settings=settings.keys())
failed_settings = dict()
for setting in settings:
# map identity type from numeric to string for comparing
if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys():
settings[setting] = identityType_map2string[settings[setting]]
if str(settings[setting]) != str(new_settings[setting]):
failed_settings[setting] = settings[setting]
if failed_settings:
log.error('Failed to change settings: {0}'.format(failed_settings))
return False
log.debug('Settings configured successfully: {0}'.format(settings.keys()))
return True
def list_apps(site):
'''
Get all configured IIS applications for the specified site.
Args:
site (str): The IIS site name.
Returns: A dictionary of the application names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_apps site
'''
ret = dict()
ps_cmd = list()
ps_cmd.append("Get-WebApplication -Site '{0}'".format(site))
ps_cmd.append(r"| Select-Object applicationPool, path, PhysicalPath, preloadEnabled,")
ps_cmd.append(r"@{ Name='name'; Expression={ $_.path.Split('/', 2)[-1] } },")
ps_cmd.append(r"@{ Name='protocols'; Expression={ @( $_.enabledProtocols.Split(',')")
ps_cmd.append(r"| Foreach-Object { $_.Trim() } ) } }")
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
protocols = list()
# If there are no associated protocols, protocols will be an empty dict,
# if there is one protocol, it will be a string, and if there are
# multiple, it will be a dict with 'Count' and 'value' as the keys.
if isinstance(item['protocols'], dict):
if 'value' in item['protocols']:
protocols += item['protocols']['value']
else:
protocols.append(item['protocols'])
ret[item['name']] = {'apppool': item['applicationPool'],
'path': item['path'],
'preload': item['preloadEnabled'],
'protocols': protocols,
'sourcepath': item['PhysicalPath']}
if not ret:
log.warning('No apps found in output: {0}'.format(cmd_ret))
return ret
def create_app(name, site, sourcepath, apppool=None):
'''
Create an IIS application.
.. note::
This function only validates against the application name, and will
return True even if the application already exists with a different
configuration. It will not modify the configuration of an existing
application.
Args:
name (str): The IIS application.
site (str): The IIS site name.
sourcepath (str): The physical path.
apppool (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_app name='app0' site='site0' sourcepath='C:\\site0' apppool='site0'
'''
current_apps = list_apps(site)
if name in current_apps:
log.debug('Application already present: {0}'.format(name))
return True
# The target physical path must exist.
if not os.path.isdir(sourcepath):
log.error('Path is not present: {0}'.format(sourcepath))
return False
ps_cmd = ['New-WebApplication',
'-Name', "'{0}'".format(name),
'-Site', "'{0}'".format(site),
'-PhysicalPath', "'{0}'".format(sourcepath)]
if apppool:
ps_cmd.extend(['-ApplicationPool', "'{0}'".format(apppool)])
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to create application: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
new_apps = list_apps(site)
if name in new_apps:
log.debug('Application created successfully: {0}'.format(name))
return True
log.error('Unable to create application: {0}'.format(name))
return False
def remove_app(name, site):
'''
Remove an IIS application.
Args:
name (str): The application name.
site (str): The IIS site name.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.remove_app name='app0' site='site0'
'''
current_apps = list_apps(site)
if name not in current_apps:
log.debug('Application already absent: {0}'.format(name))
return True
ps_cmd = ['Remove-WebApplication',
'-Name', "'{0}'".format(name),
'-Site', "'{0}'".format(site)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to remove application: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
new_apps = list_apps(site)
if name not in new_apps:
log.debug('Application removed successfully: {0}'.format(name))
return True
log.error('Unable to remove application: {0}'.format(name))
return False
def list_vdirs(site, app=_DEFAULT_APP):
'''
Get all configured IIS virtual directories for the specified site, or for
the combination of site and application.
Args:
site (str): The IIS site name.
app (str): The IIS application.
Returns:
dict: A dictionary of the virtual directory names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_vdirs site
'''
ret = dict()
ps_cmd = ['Get-WebVirtualDirectory',
'-Site', r"'{0}'".format(site),
'-Application', r"'{0}'".format(app),
'|', "Select-Object PhysicalPath, @{ Name = 'name';",
r"Expression = { $_.path.Split('/')[-1] } }"]
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
ret[item['name']] = {'sourcepath': item['physicalPath']}
if not ret:
log.warning('No vdirs found in output: {0}'.format(cmd_ret))
return ret
def create_vdir(name, site, sourcepath, app=_DEFAULT_APP):
'''
Create an IIS virtual directory.
.. note::
This function only validates against the virtual directory name, and
will return True even if the virtual directory already exists with a
different configuration. It will not modify the configuration of an
existing virtual directory.
Args:
name (str): The virtual directory name.
site (str): The IIS site name.
sourcepath (str): The physical path.
app (str): The IIS application.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_vdir name='vd0' site='site0' sourcepath='C:\\inetpub\\vdirs\\vd0'
'''
current_vdirs = list_vdirs(site, app)
if name in current_vdirs:
log.debug('Virtual directory already present: {0}'.format(name))
return True
# The target physical path must exist.
if not os.path.isdir(sourcepath):
log.error('Path is not present: {0}'.format(sourcepath))
return False
ps_cmd = ['New-WebVirtualDirectory',
'-Name', r"'{0}'".format(name),
'-Site', r"'{0}'".format(site),
'-PhysicalPath', r"'{0}'".format(sourcepath)]
if app != _DEFAULT_APP:
ps_cmd.extend(['-Application', r"'{0}'".format(app)])
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to create virtual directory: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
new_vdirs = list_vdirs(site, app)
if name in new_vdirs:
log.debug('Virtual directory created successfully: {0}'.format(name))
return True
log.error('Unable to create virtual directory: {0}'.format(name))
return False
def remove_vdir(name, site, app=_DEFAULT_APP):
'''
Remove an IIS virtual directory.
Args:
name (str): The virtual directory name.
site (str): The IIS site name.
app (str): The IIS application.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.remove_vdir name='vdir0' site='site0'
'''
current_vdirs = list_vdirs(site, app)
app_path = os.path.join(*app.rstrip('/').split('/'))
if app_path:
app_path = '{0}\\'.format(app_path)
vdir_path = r'IIS:\Sites\{0}\{1}{2}'.format(site, app_path, name)
if name not in current_vdirs:
log.debug('Virtual directory already absent: {0}'.format(name))
return True
# We use Remove-Item here instead of Remove-WebVirtualDirectory, since the
# latter has a bug that causes it to always prompt for user input.
ps_cmd = ['Remove-Item',
'-Path', r"'{0}'".format(vdir_path),
'-Recurse']
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to remove virtual directory: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
new_vdirs = list_vdirs(site, app)
if name not in new_vdirs:
log.debug('Virtual directory removed successfully: {0}'.format(name))
return True
log.error('Unable to remove virtual directory: {0}'.format(name))
return False
def list_backups():
r'''
List the IIS Configuration Backups on the System.
.. versionadded:: 2017.7.0
.. note::
Backups are made when a configuration is edited. Manual backups are
stored in the ``$env:Windir\System32\inetsrv\backup`` folder.
Returns:
dict: A dictionary of IIS Configurations backed up on the system.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_backups
'''
ret = dict()
ps_cmd = ['Get-WebConfigurationBackup',
'|',
'Select Name, CreationDate,',
'@{N="FormattedDate"; E={$_.CreationDate.ToString("G")}}', ]
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
if item['FormattedDate']:
ret[item['Name']] = item['FormattedDate']
else:
ret[item['Name']] = item['CreationDate']
if not ret:
log.warning('No backups found in output: {0}'.format(cmd_ret))
return ret
def create_backup(name):
r'''
Backup an IIS Configuration on the System.
.. versionadded:: 2017.7.0
.. note::
Backups are stored in the ``$env:Windir\System32\inetsrv\backup``
folder.
Args:
name (str): The name to give the backup
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_backup good_config_20170209
'''
if name in list_backups():
raise CommandExecutionError('Backup already present: {0}'.format(name))
ps_cmd = ['Backup-WebConfiguration',
'-Name', "'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to backup web configuration: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
return name in list_backups()
def remove_backup(name):
'''
Remove an IIS Configuration backup from the System.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the backup to remove
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.remove_backup backup_20170209
'''
if name not in list_backups():
log.debug('Backup already removed: {0}'.format(name))
return True
ps_cmd = ['Remove-WebConfigurationBackup',
'-Name', "'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to remove web configuration: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
return name not in list_backups()
def list_worker_processes(apppool):
'''
Returns a list of worker processes that correspond to the passed
application pool.
.. versionadded:: 2017.7.0
Args:
apppool (str): The application pool to query
Returns:
dict: A dictionary of worker processes with their process IDs
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_worker_processes 'My App Pool'
'''
ps_cmd = ['Get-ChildItem',
r"'IIS:\AppPools\{0}\WorkerProcesses'".format(apppool)]
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
ret = dict()
for item in items:
ret[item['processId']] = item['appPoolName']
if not ret:
log.warning('No backups found in output: {0}'.format(cmd_ret))
return ret
def get_webapp_settings(name, site, settings):
r'''
Get the value of the setting for the IIS web application.
.. note::
Params are case sensitive.
:param str name: The name of the IIS web application.
:param str site: The site name contains the web application.
Example: Default Web Site
:param str settings: A dictionary of the setting names and their values.
Available settings: physicalPath, applicationPool, userName, password
Returns:
dict: A dictionary of the provided settings and their values.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' win_iis.get_webapp_settings name='app0' site='Default Web Site'
settings="['physicalPath','applicationPool']"
'''
ret = dict()
pscmd = list()
availableSettings = ('physicalPath', 'applicationPool', 'userName', 'password')
if not settings:
log.warning('No settings provided')
return ret
pscmd.append(r'$Settings = @{};')
# Verify setting is ine predefined settings and append relevant query command per setting key
for setting in settings:
if setting in availableSettings:
if setting == "userName" or setting == "password":
pscmd.append(" $Property = Get-WebConfigurationProperty -Filter \"system.applicationHost/sites/site[@name='{0}']/application[@path='/{1}']/virtualDirectory[@path='/']\"".format(site, name))
pscmd.append(r' -Name "{0}" -ErrorAction Stop | select Value;'.format(setting))
pscmd.append(r' $Property = $Property | Select-Object -ExpandProperty Value;')
pscmd.append(r" $Settings['{0}'] = [String] $Property;".format(setting))
pscmd.append(r' $Property = $Null;')
if setting == "physicalPath" or setting == "applicationPool":
pscmd.append(r" $Property = (get-webapplication {0}).{1};".format(name, setting))
pscmd.append(r" $Settings['{0}'] = [String] $Property;".format(setting))
pscmd.append(r' $Property = $Null;')
else:
availSetStr = ', '.join(availableSettings)
message = 'Unexpected setting:' + setting + '. Available settings are: ' + availSetStr
raise SaltInvocationError(message)
pscmd.append(' $Settings')
# Run commands and return data as json
cmd_ret = _srvmgr(cmd=str().join(pscmd), return_json=True)
# Update dict var to return data
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
if isinstance(items, list):
ret.update(items[0])
else:
ret.update(items)
except ValueError:
log.error('Unable to parse return data as Json.')
if None in six.viewvalues(ret):
message = 'Some values are empty - please validate site and web application names. Some commands are case sensitive'
raise SaltInvocationError(message)
return ret
def set_webapp_settings(name, site, settings):
r'''
Configure an IIS application.
.. note::
This function only configures existing app.
Params are case sensitive.
:param str name: The IIS application.
:param str site: The IIS site name.
:param str settings: A dictionary of the setting names and their values.
:available settings: physicalPath: The physical path of the webapp.
: applicationPool: The application pool for the webapp.
: userName: "connectAs" user
: password: "connectAs" password for user
:return: A boolean representing whether all changes succeeded.
:rtype: bool
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' win_iis.set_webapp_settings name='app0' site='site0' settings="{'physicalPath': 'C:\site0', 'apppool': 'site0'}"
'''
pscmd = list()
current_apps = list_apps(site)
current_sites = list_sites()
availableSettings = ('physicalPath', 'applicationPool', 'userName', 'password')
# Validate params
if name not in current_apps:
msg = "Application" + name + "doesn't exist"
raise SaltInvocationError(msg)
if site not in current_sites:
msg = "Site" + site + "doesn't exist"
raise SaltInvocationError(msg)
if not settings:
msg = "No settings provided"
raise SaltInvocationError(msg)
# Treat all values as strings for the purpose of comparing them to existing values & validate settings exists in predefined settings list
for setting in settings.keys():
if setting in availableSettings:
settings[setting] = str(settings[setting])
else:
availSetStr = ', '.join(availableSettings)
log.error("Unexpected setting: %s ", setting)
log.error("Available settings: %s", availSetStr)
msg = "Unexpected setting:" + setting + " Available settings:" + availSetStr
raise SaltInvocationError(msg)
# Check if settings already configured
current_settings = get_webapp_settings(
name=name, site=site, settings=settings.keys())
if settings == current_settings:
log.warning('Settings already contain the provided values.')
return True
for setting in settings:
# If the value is numeric, don't treat it as a string in PowerShell.
try:
complex(settings[setting])
value = settings[setting]
except ValueError:
value = "'{0}'".format(settings[setting])
# Append relevant update command per setting key
if setting == "userName" or setting == "password":
pscmd.append(" Set-WebConfigurationProperty -Filter \"system.applicationHost/sites/site[@name='{0}']/application[@path='/{1}']/virtualDirectory[@path='/']\"".format(site, name))
pscmd.append(" -Name \"{0}\" -Value {1};".format(setting, value))
if setting == "physicalPath" or setting == "applicationPool":
pscmd.append(r' Set-ItemProperty "IIS:\Sites\{0}\{1}" -Name {2} -Value {3};'.format(site, name, setting, value))
if setting == "physicalPath":
if not os.path.isdir(settings[setting]):
msg = 'Path is not present: ' + settings[setting]
raise SaltInvocationError(msg)
# Run commands
cmd_ret = _srvmgr(pscmd)
# Verify commands completed successfully
if cmd_ret['retcode'] != 0:
msg = 'Unable to set settings for web application {0}'.format(name)
raise SaltInvocationError(msg)
# verify changes
new_settings = get_webapp_settings(
name=name, site=site, settings=settings.keys())
failed_settings = dict()
for setting in settings:
if str(settings[setting]) != str(new_settings[setting]):
failed_settings[setting] = settings[setting]
if failed_settings:
log.error('Failed to change settings: {0}'.format(failed_settings))
return False
log.debug('Settings configured successfully: {0}'.format(settings.keys()))
return True
| 31.200304
| 205
| 0.610512
|
47d7a553f57d4a538ba839c30285fbc10c0ce27c
| 6,588
|
py
|
Python
|
x10mqtt/data/x10mqtt.py
|
mmotley999/addons
|
a12b79a0204495c15a55833a89a21ef362d95fa3
|
[
"Unlicense"
] | 1
|
2021-05-12T01:00:18.000Z
|
2021-05-12T01:00:18.000Z
|
x10mqtt/data/x10mqtt.py
|
mmotley999/addons
|
a12b79a0204495c15a55833a89a21ef362d95fa3
|
[
"Unlicense"
] | 1
|
2022-02-21T05:09:44.000Z
|
2022-03-16T05:31:13.000Z
|
x10mqtt/data/x10mqtt.py
|
mmotley999/addons
|
a12b79a0204495c15a55833a89a21ef362d95fa3
|
[
"Unlicense"
] | 1
|
2021-02-28T18:29:39.000Z
|
2021-02-28T18:29:39.000Z
|
# -------------------------------------------------------------------------------
#
# X10mqtt Home Assistant Addon
#
# This script allows for bridging between MQTT and X10.
#
# It utilizes the 'heyu' command (https://www.heyu.org) for X10 control
# and monitoring.
#
# This was written and tested using a CM11a attached via a USB-to-Serial
# adapter. It may work with a CM17 Firecracker as well.
#
# This does NOT support the USB devices like the SmartHome PowerLinc 1132B,
# or the X10 CM15A.
#
# This only allows ON and OFF commands to X10 appliance modules (or lamp modules).
# Sorry, dimmer control is NOT supported.
#
# -------------------------------------------------------------------------------
import paho.mqtt.client as mqtt
import re
import subprocess
import os
try:
broker = os.environ['MQTTBROKER']
except:
print("Must define MQTT Broker in configuration!")
exit(1)
try:
port = int(os.environ['MQTTPORT'])
except:
print("Must define MQTT port in configuration!")
exit(1)
try:
mqttuser = os.environ['MQTTUSER']
except:
mqttuser = ""
try:
mqttpass = os.environ['MQTTPASS']
except:
mqttpass = ""
# rcvihc stores the house code from the monitor
rcvihc = ""
# cmdtopic for commands. Housecode is appended.
# e.g. 'x10/cmd/A1' to command A1 device.
#
# Payload is either "ON" to turn on a unit, or "OFF" to turn it off
#
# Defaults to 'x10/cmd' if not defined
#
try:
cmdtopic = os.environ['MQTTCMDTOPIC']
except:
cmdtopic = "x10/cmd"
#
# status topic is for status updates
#
# We set the payload to "ON" or "OFF" for status updates
# This was added to support X10 remote buttons in order to keep
# the switch/light state correct in Home Assistant.
#
try:
stattopic = os.environ['MQTTSTATTOPIC']
except:
stattopic = "x10/stat"
#
# Whether a CM17A is in use
#
if os.getenv('CM17') != None:
cm17 = True
else:
cm17 = False
#
# Execute Heyu command
# cmd is one of:
# ON - turn on housecode
# OFF - Turn off housecode
#
#
def execute(client, cmd, housecode):
# For the CM11, send command to heyu as-is (ON or OFF)
# For the CM17, we need to change it to FON or FOFF.
heyucmd = cmd
if cm17:
if cmd.lower() == "on":
heyucmd = "fon"
if cmd.lower() == "off":
heyucmd = "foff"
result = subprocess.run(["heyu", heyucmd.lower(), housecode.lower()])
if result.returncode:
print("Error running heyu, return code: "+str(result.returncode))
print("Device Status Update: "+stattopic+"/"+housecode.lower())
client.publish(stattopic+"/"+housecode.lower(),cmd.upper(),retain=True)
return (result.returncode)
#
# Execute heyu monitor
# This is a long-lived process that monitors for X10 changes,
# like from a remote control.
#
def monitor():
popen = subprocess.Popen(["heyu","monitor"], stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
#
# The monitor lines are broken out into 2 lines for one event:
# rcvi addr unit - Declares the full unit number
# rcvi func - The function on that unit
#
#
# Monitor rcvi addr unit - save the unit address in a variable for later
#
# Argument: housecode, which is the housecode involved. This is captured from the regex in the main loop and passed.
#
def rcviaddr(housecode):
global rcvihc
# Store the received housecode for when rcvifunc is received
rcvihc = housecode
#
# Monitor rcvi func - the function that was applied to the housecode
#
# This happens after the 'rcvi addr unit', so the housecode that is stored
# from that is what is used.
#
# Argument: func, which is the function (On or Off). This is captured from the regex in the main loop and passed.
#
def rcvifunc(client,func):
global rcvihc
if rcvihc:
print("Remote status change, publishing stat update: "+stattopic+"/"+rcvihc.lower()+" is now "+func.upper())
client.publish(stattopic+"/"+rcvihc.lower(),func.upper(), retain=True)
rcvihc = ""
#
# Define MQTT Connect Callback
#
def on_connect (client, userdata, flags, rc):
# Set up MQTT subscription
if rc:
print("Error connecting to MQTT broker rc "+str(rc))
print("Connected to MQTT broker, result code "+str(rc))
client.subscribe(cmdtopic+"/+")
#
# Callback for MQTT message received
#
def on_message(client, userdata, message):
# Determine the device from the topic
# Topics are cmdtopic/dev, e.g. 'x10/cmd/A1'
# So the last part is the device we want to control
command = str(message.payload.decode('utf-8')).upper()
print("Received: "+message.topic+" "+command)
topiclist = message.topic.split("/")
# Get the homecode and convert it to upper case
hc = topiclist[len(topiclist)-1].upper()
# Check that everything is right
hcpattern = re.compile("^[A-P][0-9]+$")
if command in ["ON", "OFF"] and hcpattern.match(hc):
print("Sending X10 command to homecode "+hc)
result = execute(client, command, hc)
else:
print("Invalid command or home code")
# ---------------------------
# Main program
# ---------------------------
# MQTT connect
print("Establishing MQTT to "+broker+" port "+str(port)+"...")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
# Check if mqttuser or mqttpass is not blank
# If not, then configure the username and password
if mqttuser and mqttpass:
print("(Using MQTT username "+mqttuser+")")
client.username_pw_set(mqttuser,mqttpass)
try:
client.connect(broker,port)
except:
print("Connection failed. Make sure broker, port, and user is defined correctly")
exit(1)
if cm17:
print("CM17 is in use")
# Start the MQTT loop
print("Waiting for MQTT messages and monitoring for remote changes")
client.loop_start()
# We run 'heyu monitor' in the background to monitor for any X10 changes outside of us (e.g. X10 remotes)
# This way, we can send MQTT status changes if something does change.
# Regular expressions used to catch X10 updates, e.g. from X10 remotes
rercviaddr = re.compile(r"rcvi addr unit.+hu ([A-P][0-9]+)")
rercvifunc = re.compile(r"rcvi func.*(On|Off) :")
# Start the monitor process, which runs all the time.
# Catch any updates we care about so we can handle sending status updates via MQTT
for line in monitor():
addrsearch = rercviaddr.search(line)
funcsearch = rercvifunc.search(line)
if addrsearch:
rcviaddr(str(addrsearch.group(1)))
if funcsearch:
rcvifunc(client,str(funcsearch.group(1)))
| 26.457831
| 118
| 0.67881
|
5308107fed4b6c700ddd72b7307b7462d656039d
| 6,030
|
py
|
Python
|
IRIS_data_download/IRIS_download_support/obspy/io/wav/core.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-03-05T01:03:01.000Z
|
2020-12-17T05:04:07.000Z
|
IRIS_data_download/IRIS_download_support/obspy/io/wav/core.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 4
|
2021-03-31T19:25:55.000Z
|
2021-12-13T20:32:46.000Z
|
IRIS_data_download/IRIS_download_support/obspy/io/wav/core.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-09-08T19:33:40.000Z
|
2021-04-05T09:47:50.000Z
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Filename: core.py
# Purpose: Python Class for transforming seismograms to audio WAV files
# Author: Moritz Beyreuther
# Email: moritz.beyreuther@geophysik.uni-muenchen.de
#
# Copyright (C) 2009-2012 Moritz Beyreuther
# ------------------------------------------------------------------------
"""
WAV bindings to ObsPy core module.
:copyright:
The ObsPy Development Team (devs@obspy.org)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from future.utils import native_str
import os
import wave
import numpy as np
from obspy import Stream, Trace
from obspy.core.compatibility import from_buffer
# WAVE data format is unsigned char up to 8bit, and signed int
# for the remaining.
WIDTH2DTYPE = {
1: native_str('<u1'), # unsigned char
2: native_str('<i2'), # signed short int
4: native_str('<i4'), # signed int (int32)
}
def _is_wav(filename):
"""
Checks whether a file is a audio WAV file or not.
:type filename: str
:param filename: Name of the audio WAV file to be checked.
:rtype: bool
:return: ``True`` if a WAV file.
.. rubric:: Example
>>> _is_wav("/path/to/3cssan.near.8.1.RNON.wav") #doctest: +SKIP
True
"""
try:
fh = wave.open(filename, 'rb')
try:
(_nchannel, width, _rate, _len, _comptype, _compname) = \
fh.getparams()
finally:
fh.close()
except Exception:
return False
if width in [1, 2, 4]:
return True
return False
def _read_wav(filename, headonly=False, **kwargs): # @UnusedVariable
"""
Reads a audio WAV file and returns an ObsPy Stream object.
Currently supports uncompressed unsigned char and short integer and
integer data values. This should cover most WAV files.
.. warning::
This function should NOT be called directly, it registers via the
ObsPy :func:`~obspy.core.stream.read` function, call this instead.
:type filename: str
:param filename: Audio WAV file to be read.
:rtype: :class:`~obspy.core.stream.Stream`
:return: A ObsPy Stream object.
.. rubric:: Example
>>> from obspy import read
>>> st = read("/path/to/3cssan.near.8.1.RNON.wav")
>>> print(st) #doctest: +NORMALIZE_WHITESPACE
1 Trace(s) in Stream:
... | 1970-01-01T00:00:00.000000Z - 1970-01-01T00:00:00.371143Z
| 7000.0 Hz, 2599 samples
"""
# read WAV file
fh = wave.open(filename, 'rb')
try:
# header information
(_nchannel, width, rate, length, _comptype, _compname) = fh.getparams()
header = {'sampling_rate': rate, 'npts': length}
if headonly:
return Stream([Trace(header=header)])
if width not in WIDTH2DTYPE.keys():
msg = "Unsupported Format Type, word width %dbytes" % width
raise TypeError(msg)
data = from_buffer(fh.readframes(length), dtype=WIDTH2DTYPE[width])
finally:
fh.close()
return Stream([Trace(header=header, data=data)])
def _write_wav(stream, filename, framerate=7000, rescale=False, width=None,
**kwargs): # @UnusedVariable
"""
Writes a audio WAV file from given ObsPy Stream object. The seismogram is
squeezed to audible frequencies.
The generated WAV sound file is as a result really short. The data
are written uncompressed as signed 4-byte integers.
.. warning::
This function should NOT be called directly, it registers via the
the :meth:`~obspy.core.stream.Stream.write` method of an
ObsPy :class:`~obspy.core.stream.Stream` object, call this instead.
:type stream: :class:`~obspy.core.stream.Stream`
:param stream: The ObsPy Stream object to write.
:type filename: str
:param filename: Name of the audio WAV file to write.
:type framerate: int, optional
:param framerate: Sample rate of WAV file to use. This this will squeeze
the seismogram (default is 7000).
:type rescale: bool, optional
:param rescale: Maximum to maximal representable number
:type width: int, optional
:param width: dtype to write, 1 for '<u1', 2 for '<i2' or 4 for '<i4'.
tries to autodetect width from data, uses 4 otherwise
"""
i = 0
base, ext = os.path.splitext(filename)
if width not in WIDTH2DTYPE.keys() and width is not None:
raise TypeError("Unsupported Format Type, word width %dbytes" % width)
for trace in stream:
# try to autodetect width from data, see #791
if width is None:
if trace.data.dtype.str[-2:] in ['u1', 'i2', 'i4']:
tr_width = int(trace.data.dtype.str[-1])
else:
tr_width = 4
else:
tr_width = width
# write WAV file
if len(stream) >= 2:
filename = "%s%03d%s" % (base, i, ext)
w = wave.open(filename, 'wb')
try:
trace.stats.npts = len(trace.data)
# (nchannels, sampwidth, framerate, nframes, comptype, compname)
w.setparams((1, tr_width, framerate, trace.stats.npts, 'NONE',
'not compressed'))
data = trace.data
dtype = WIDTH2DTYPE[tr_width]
if rescale:
# optimal scale, account for +/- and the zero
maxint = 2 ** (width * 8 - 1) - 1
# upcast for following rescaling
data = data.astype(np.float64)
data = data / abs(data).max() * maxint
data = np.require(data, dtype=dtype)
w.writeframes(data.tostring())
finally:
w.close()
i += 1
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| 33.687151
| 79
| 0.605307
|
3badf1a62191f84d908e681928a6410286bbc214
| 13,529
|
py
|
Python
|
mitmproxy/tools/console/grideditor/base.py
|
0x7c48/mitmproxy
|
f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba
|
[
"MIT"
] | 74
|
2016-03-20T17:39:26.000Z
|
2020-05-12T13:53:23.000Z
|
mitmproxy/tools/console/grideditor/base.py
|
0x7c48/mitmproxy
|
f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba
|
[
"MIT"
] | 2
|
2021-05-11T22:12:31.000Z
|
2022-02-10T23:49:01.000Z
|
mitmproxy/tools/console/grideditor/base.py
|
0x7c48/mitmproxy
|
f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba
|
[
"MIT"
] | 5
|
2016-12-14T14:56:57.000Z
|
2020-03-08T20:58:31.000Z
|
import abc
import copy
import os
import typing
import urwid
from mitmproxy.utils import strutils
from mitmproxy import exceptions
from mitmproxy.tools.console import signals
from mitmproxy.tools.console import layoutwidget
import mitmproxy.tools.console.master # noqa
def read_file(filename: str, escaped: bool) -> typing.AnyStr:
filename = os.path.expanduser(filename)
try:
with open(filename, "r" if escaped else "rb") as f:
d = f.read()
except IOError as v:
raise exceptions.CommandError(v)
if escaped:
try:
d = strutils.escaped_str_to_bytes(d)
except ValueError:
raise exceptions.CommandError("Invalid Python-style string encoding.")
return d
class Cell(urwid.WidgetWrap):
def get_data(self):
"""
Raises:
ValueError, if the current content is invalid.
"""
raise NotImplementedError()
def selectable(self):
return True
class Column(metaclass=abc.ABCMeta):
subeditor: urwid.Edit = None
def __init__(self, heading):
self.heading = heading
@abc.abstractmethod
def Display(self, data) -> Cell:
pass
@abc.abstractmethod
def Edit(self, data) -> Cell:
pass
@abc.abstractmethod
def blank(self) -> typing.Any:
pass
def keypress(self, key: str, editor: "GridEditor") -> typing.Optional[str]:
return key
class GridRow(urwid.WidgetWrap):
def __init__(
self,
focused: typing.Optional[int],
editing: bool,
editor: "GridEditor",
values: typing.Tuple[typing.Iterable[bytes], typing.Container[int]]
) -> None:
self.focused = focused
self.editor = editor
self.edit_col: typing.Optional[Cell] = None
errors = values[1]
self.fields: typing.Sequence[typing.Any] = []
for i, v in enumerate(values[0]):
if focused == i and editing:
self.edit_col = self.editor.columns[i].Edit(v)
self.fields.append(self.edit_col)
else:
w = self.editor.columns[i].Display(v)
if focused == i:
if i in errors:
w = urwid.AttrWrap(w, "focusfield_error")
else:
w = urwid.AttrWrap(w, "focusfield")
elif i in errors:
w = urwid.AttrWrap(w, "field_error")
self.fields.append(w)
fspecs = self.fields[:]
if len(self.fields) > 1:
fspecs[0] = ("fixed", self.editor.first_width + 2, fspecs[0])
w = urwid.Columns(
fspecs,
dividechars=2
)
if focused is not None:
w.set_focus_column(focused)
super().__init__(w)
def keypress(self, s, k):
if self.edit_col:
w = self._w.column_widths(s)[self.focused]
k = self.edit_col.keypress((w,), k)
return k
def selectable(self):
return True
class GridWalker(urwid.ListWalker):
"""
Stores rows as a list of (rows, errors) tuples, where rows is a list
and errors is a set with an entry of each offset in rows that is an
error.
"""
def __init__(
self,
lst: typing.Iterable[list],
editor: "GridEditor"
) -> None:
self.lst: typing.Sequence[typing.Tuple[typing.Any, typing.Set]] = [(i, set()) for i in lst]
self.editor = editor
self.focus = 0
self.focus_col = 0
self.edit_row: typing.Optional[GridRow] = None
def _modified(self):
self.editor.show_empty_msg()
return super()._modified()
def add_value(self, lst):
self.lst.append(
(lst[:], set())
)
self._modified()
def get_current_value(self):
if self.lst:
return self.lst[self.focus][0][self.focus_col]
def set_current_value(self, val):
errors = self.lst[self.focus][1]
emsg = self.editor.is_error(self.focus_col, val)
if emsg:
signals.status_message.send(message=emsg, expire=5)
errors.add(self.focus_col)
else:
errors.discard(self.focus_col)
self.set_value(val, self.focus, self.focus_col, errors)
def set_value(self, val, focus, focus_col, errors=None):
if not errors:
errors = set([])
row = list(self.lst[focus][0])
row[focus_col] = val
self.lst[focus] = [tuple(row), errors]
self._modified()
def delete_focus(self):
if self.lst:
del self.lst[self.focus]
self.focus = min(len(self.lst) - 1, self.focus)
self._modified()
def _insert(self, pos):
self.focus = pos
self.lst.insert(
self.focus,
([c.blank() for c in self.editor.columns], set([]))
)
self.focus_col = 0
self.start_edit()
def insert(self):
return self._insert(self.focus)
def add(self):
return self._insert(min(self.focus + 1, len(self.lst)))
def start_edit(self):
col = self.editor.columns[self.focus_col]
if self.lst and not col.subeditor:
self.edit_row = GridRow(
self.focus_col, True, self.editor, self.lst[self.focus]
)
self._modified()
def stop_edit(self):
if self.edit_row:
try:
val = self.edit_row.edit_col.get_data()
except ValueError:
return
self.edit_row = None
self.set_current_value(val)
def left(self):
self.focus_col = max(self.focus_col - 1, 0)
self._modified()
def right(self):
self.focus_col = min(self.focus_col + 1, len(self.editor.columns) - 1)
self._modified()
def tab_next(self):
self.stop_edit()
if self.focus_col < len(self.editor.columns) - 1:
self.focus_col += 1
elif self.focus != len(self.lst) - 1:
self.focus_col = 0
self.focus += 1
self._modified()
def get_focus(self):
if self.edit_row:
return self.edit_row, self.focus
elif self.lst:
return GridRow(
self.focus_col,
False,
self.editor,
self.lst[self.focus]
), self.focus
else:
return None, None
def set_focus(self, focus):
self.stop_edit()
self.focus = focus
self._modified()
def get_next(self, pos):
if pos + 1 >= len(self.lst):
return None, None
return GridRow(None, False, self.editor, self.lst[pos + 1]), pos + 1
def get_prev(self, pos):
if pos - 1 < 0:
return None, None
return GridRow(None, False, self.editor, self.lst[pos - 1]), pos - 1
class GridListBox(urwid.ListBox):
def __init__(self, lw):
super().__init__(lw)
FIRST_WIDTH_MAX = 40
class BaseGridEditor(urwid.WidgetWrap):
title = ""
keyctx = "grideditor"
def __init__(
self,
master: "mitmproxy.tools.console.master.ConsoleMaster",
title,
columns,
value: typing.Any,
callback: typing.Callable[..., None],
*cb_args,
**cb_kwargs
) -> None:
value = self.data_in(copy.deepcopy(value))
self.master = master
self.title = title
self.columns = columns
self.value = value
self.callback = callback
self.cb_args = cb_args
self.cb_kwargs = cb_kwargs
first_width = 20
if value:
for r in value:
assert len(r) == len(self.columns)
first_width = max(len(r), first_width)
self.first_width = min(first_width, FIRST_WIDTH_MAX)
h = None
if any(col.heading for col in self.columns):
headings = []
for i, col in enumerate(self.columns):
c = urwid.Text(col.heading)
if i == 0 and len(self.columns) > 1:
headings.append(("fixed", first_width + 2, c))
else:
headings.append(c)
h = urwid.Columns(
headings,
dividechars=2
)
h = urwid.AttrWrap(h, "heading")
self.walker = GridWalker(self.value, self)
self.lb = GridListBox(self.walker)
w = urwid.Frame(self.lb, header=h)
super().__init__(w)
self.show_empty_msg()
def layout_popping(self):
res = []
for i in self.walker.lst:
if not i[1] and any([x for x in i[0]]):
res.append(i[0])
self.callback(self.data_out(res), *self.cb_args, **self.cb_kwargs)
def show_empty_msg(self):
if self.walker.lst:
self._w.set_footer(None)
else:
self._w.set_footer(
urwid.Text(
[
("highlight", "No values - you should add some. Press "),
("key", "?"),
("highlight", " for help."),
]
)
)
def set_subeditor_value(self, val, focus, focus_col):
self.walker.set_value(val, focus, focus_col)
def keypress(self, size, key):
if self.walker.edit_row:
if key == "esc":
self.walker.stop_edit()
elif key == "tab":
pf, pfc = self.walker.focus, self.walker.focus_col
self.walker.tab_next()
if self.walker.focus == pf and self.walker.focus_col != pfc:
self.walker.start_edit()
else:
self._w.keypress(size, key)
return None
column = self.columns[self.walker.focus_col]
if key == "m_start":
self.walker.set_focus(0)
elif key == "m_next":
self.walker.tab_next()
elif key == "m_end":
self.walker.set_focus(len(self.walker.lst) - 1)
elif key == "left":
self.walker.left()
elif key == "right":
self.walker.right()
elif column.keypress(key, self) and not self.handle_key(key):
return self._w.keypress(size, key)
def data_out(self, data: typing.Sequence[list]) -> typing.Any:
"""
Called on raw list data, before data is returned through the
callback.
"""
return data
def data_in(self, data: typing.Any) -> typing.Iterable[list]:
"""
Called to prepare provided data.
"""
return data
def is_error(self, col: int, val: typing.Any) -> typing.Optional[str]:
"""
Return None, or a string error message.
"""
return None
def handle_key(self, key):
return False
def cmd_add(self):
self.walker.add()
def cmd_insert(self):
self.walker.insert()
def cmd_delete(self):
self.walker.delete_focus()
def cmd_read_file(self, path):
self.walker.set_current_value(read_file(path, False))
def cmd_read_file_escaped(self, path):
self.walker.set_current_value(read_file(path, True))
def cmd_spawn_editor(self):
o = self.walker.get_current_value()
if o is not None:
n = self.master.spawn_editor(o)
n = strutils.clean_hanging_newline(n)
self.walker.set_current_value(n)
class GridEditor(BaseGridEditor):
title: str = None
columns: typing.Sequence[Column] = None
keyctx = "grideditor"
def __init__(
self,
master: "mitmproxy.tools.console.master.ConsoleMaster",
value: typing.Any,
callback: typing.Callable[..., None],
*cb_args,
**cb_kwargs
) -> None:
super().__init__(
master,
self.title,
self.columns,
value,
callback,
*cb_args,
**cb_kwargs
)
class FocusEditor(urwid.WidgetWrap, layoutwidget.LayoutWidget):
"""
A specialised GridEditor that edits the current focused flow.
"""
keyctx = "grideditor"
def __init__(self, master):
self.master = master
def call(self, v, name, *args, **kwargs):
f = getattr(v, name, None)
if f:
f(*args, **kwargs)
def get_data(self, flow):
"""
Retrieve the data to edit from the current flow.
"""
raise NotImplementedError
def set_data(self, vals, flow):
"""
Set the current data on the flow.
"""
raise NotImplementedError
def set_data_update(self, vals, flow):
self.set_data(vals, flow)
signals.flow_change.send(self, flow = flow)
def key_responder(self):
return self._w
def layout_popping(self):
self.call(self._w, "layout_popping")
def layout_pushed(self, prev):
if self.master.view.focus.flow:
self._w = BaseGridEditor(
self.master,
self.title,
self.columns,
self.get_data(self.master.view.focus.flow),
self.set_data_update,
self.master.view.focus.flow,
)
else:
self._w = urwid.Pile([])
| 28.422269
| 99
| 0.541873
|
148c3ff3dc4f27f4e66cc0026a89a1ad41d67887
| 11,156
|
py
|
Python
|
code/python/FactSetNER/v1/fds/sdk/FactSetNER/model/request.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/FactSetNER/v1/fds/sdk/FactSetNER/model/request.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/FactSetNER/v1/fds/sdk/FactSetNER/model/request.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
NER API
Extract named entities and their FactSet entity IDs from given document text. # noqa: E501
The version of the OpenAPI document: 1.5.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FactSetNER.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FactSetNER.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.FactSetNER.model.input import Input
globals()['Input'] = Input
class Request(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'input': (Input,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'input': 'input', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Request - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
input (Input): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Request - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
input (Input): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 42.580153
| 121
| 0.568932
|
1d818cdbaca29dbb439ba6f5e05ebb782668ae68
| 118
|
py
|
Python
|
example/__init__.py
|
theo-l/py_settings
|
8780161d4f633b480bc5e834f72f11e8e1460268
|
[
"Apache-2.0"
] | null | null | null |
example/__init__.py
|
theo-l/py_settings
|
8780161d4f633b480bc5e834f72f11e8e1460268
|
[
"Apache-2.0"
] | null | null | null |
example/__init__.py
|
theo-l/py_settings
|
8780161d4f633b480bc5e834f72f11e8e1460268
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@Creation: 22/06/2020 22:08
@Author: liang
@File: __init__.py
"""
| 16.857143
| 28
| 0.610169
|
6f2195d168660866fcd344ff054f461cfe16a2f2
| 4,568
|
py
|
Python
|
object_detection/serve_model.py
|
moxel/tf-models
|
0a8758e091ed3773d0ba6650bcf895aa0596fa81
|
[
"Apache-2.0"
] | null | null | null |
object_detection/serve_model.py
|
moxel/tf-models
|
0a8758e091ed3773d0ba6650bcf895aa0596fa81
|
[
"Apache-2.0"
] | null | null | null |
object_detection/serve_model.py
|
moxel/tf-models
|
0a8758e091ed3773d0ba6650bcf895aa0596fa81
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
sys.path.append('..')
from collections import defaultdict
from io import StringIO, BytesIO
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from PIL import Image
import scipy.misc
from utils import label_map_util
from utils import visualization_utils as vis_util
from flask import Flask, request, jsonify
import subprocess
import uuid
import base64
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
print('Loading model...')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
print('Loading label...')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# helper code.
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
app = Flask(__name__)
@app.route('/', methods=['GET'])
def health_check():
return jsonify({
'status': 'OK'
})
@app.route('/', methods=['POST'])
def detect():
data = request.json
uid = str(uuid.uuid4())[:10]
image_binary = base64.b64decode(data['image'])
image_f = BytesIO()
image_f.write(image_binary)
image_f.seek(0)
image = Image.open(image_f)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
vis_file = BytesIO()
scipy.misc.imsave(vis_file, image_np, format='png')
vis_file.seek(0)
vis_binary = vis_file.read()
return jsonify({
'boxes': list(map(lambda l: [float(x) for x in l], boxes[0])),
#'scores': list(scores[0]),
#'classes': list(classes[0]),
#'num_detections': int(num_detections),
'vis': base64.b64encode(vis_binary).decode('utf-8'),
})
if __name__ == '__main__':
app.run(debug=False, port=5900, host='0.0.0.0')
| 34.345865
| 122
| 0.676226
|
d18486a4801d70a1fa822e69a774f4c70cd790a3
| 272
|
py
|
Python
|
util/__init__.py
|
garion9013/shrinkbench
|
c460cbc371ef2673b2a3f8498ca7a8aeb60d78bb
|
[
"MIT"
] | null | null | null |
util/__init__.py
|
garion9013/shrinkbench
|
c460cbc371ef2673b2a3f8498ca7a8aeb60d78bb
|
[
"MIT"
] | null | null | null |
util/__init__.py
|
garion9013/shrinkbench
|
c460cbc371ef2673b2a3f8498ca7a8aeb60d78bb
|
[
"MIT"
] | null | null | null |
from .automap import AutoMap
from .color import printc, highlight, colors
from .csvlogger import CSVLogger
from .online import OnlineStats, OnlineStatsMap
import json
def is_jsonable(x):
try:
json.dumps(x)
return True
except:
return False
| 20.923077
| 47
| 0.713235
|
f705a8ee33fb9360f9cccd2c289e63ed88006920
| 6,249
|
py
|
Python
|
applications/welcome/models/menu.py
|
Gorang-Maniar/DGD
|
c7b2624c0d0bb0127214ec3804acbe2cc70f8ce0
|
[
"BSD-3-Clause"
] | 8
|
2018-04-13T14:54:02.000Z
|
2021-03-04T10:58:09.000Z
|
webui/applications/grid/models/menu.py
|
pouyana/teireader
|
ac0a92d8b2e570eae1c0a03fd35a7b281eccd250
|
[
"MIT"
] | 39
|
2018-03-23T09:25:38.000Z
|
2022-03-23T15:22:15.000Z
|
webui/applications/grid/models/menu.py
|
pouyana/teireader
|
ac0a92d8b2e570eae1c0a03fd35a7b281eccd250
|
[
"MIT"
] | 3
|
2019-04-09T03:49:21.000Z
|
2020-03-05T03:51:25.000Z
|
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
response.logo = A(B('web',SPAN(2),'py'),XML('™ '),
_class="brand",_href="http://www.web2py.com/")
response.title = request.application.replace('_',' ').title()
response.subtitle = ''
## read more at http://dev.w3.org/html5/markup/meta.name.html
response.meta.author = 'Your Name <you@example.com>'
response.meta.description = 'a cool new app'
response.meta.keywords = 'web2py, python, framework'
response.meta.generator = 'Web2py Web Framework'
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
response.menu = [
(T('Home'), False, URL('default', 'index'), [])
]
DEVELOPMENT_MENU = True
#########################################################################
## provide shortcuts for development. remove in production
#########################################################################
def _():
# shortcuts
app = request.application
ctr = request.controller
# useful links to internal and external resources
response.menu += [
(SPAN('web2py', _class='highlighted'), False, 'http://web2py.com', [
(T('My Sites'), False, URL('admin', 'default', 'site')),
(T('This App'), False, URL('admin', 'default', 'design/%s' % app), [
(T('Controller'), False,
URL(
'admin', 'default', 'edit/%s/controllers/%s.py' % (app, ctr))),
(T('View'), False,
URL(
'admin', 'default', 'edit/%s/views/%s' % (app, response.view))),
(T('Layout'), False,
URL(
'admin', 'default', 'edit/%s/views/layout.html' % app)),
(T('Stylesheet'), False,
URL(
'admin', 'default', 'edit/%s/static/css/web2py.css' % app)),
(T('DB Model'), False,
URL(
'admin', 'default', 'edit/%s/models/db.py' % app)),
(T('Menu Model'), False,
URL(
'admin', 'default', 'edit/%s/models/menu.py' % app)),
(T('Database'), False, URL(app, 'appadmin', 'index')),
(T('Errors'), False, URL(
'admin', 'default', 'errors/' + app)),
(T('About'), False, URL(
'admin', 'default', 'about/' + app)),
]),
('web2py.com', False, 'http://www.web2py.com', [
(T('Download'), False,
'http://www.web2py.com/examples/default/download'),
(T('Support'), False,
'http://www.web2py.com/examples/default/support'),
(T('Demo'), False, 'http://web2py.com/demo_admin'),
(T('Quick Examples'), False,
'http://web2py.com/examples/default/examples'),
(T('FAQ'), False, 'http://web2py.com/AlterEgo'),
(T('Videos'), False,
'http://www.web2py.com/examples/default/videos/'),
(T('Free Applications'),
False, 'http://web2py.com/appliances'),
(T('Plugins'), False, 'http://web2py.com/plugins'),
(T('Layouts'), False, 'http://web2py.com/layouts'),
(T('Recipes'), False, 'http://web2pyslices.com/'),
(T('Semantic'), False, 'http://web2py.com/semantic'),
]),
(T('Documentation'), False, 'http://www.web2py.com/book', [
(T('Preface'), False,
'http://www.web2py.com/book/default/chapter/00'),
(T('Introduction'), False,
'http://www.web2py.com/book/default/chapter/01'),
(T('Python'), False,
'http://www.web2py.com/book/default/chapter/02'),
(T('Overview'), False,
'http://www.web2py.com/book/default/chapter/03'),
(T('The Core'), False,
'http://www.web2py.com/book/default/chapter/04'),
(T('The Views'), False,
'http://www.web2py.com/book/default/chapter/05'),
(T('Database'), False,
'http://www.web2py.com/book/default/chapter/06'),
(T('Forms and Validators'), False,
'http://www.web2py.com/book/default/chapter/07'),
(T('Email and SMS'), False,
'http://www.web2py.com/book/default/chapter/08'),
(T('Access Control'), False,
'http://www.web2py.com/book/default/chapter/09'),
(T('Services'), False,
'http://www.web2py.com/book/default/chapter/10'),
(T('Ajax Recipes'), False,
'http://www.web2py.com/book/default/chapter/11'),
(T('Components and Plugins'), False,
'http://www.web2py.com/book/default/chapter/12'),
(T('Deployment Recipes'), False,
'http://www.web2py.com/book/default/chapter/13'),
(T('Other Recipes'), False,
'http://www.web2py.com/book/default/chapter/14'),
(T('Buy this book'), False,
'http://stores.lulu.com/web2py'),
]),
(T('Community'), False, None, [
(T('Groups'), False,
'http://www.web2py.com/examples/default/usergroups'),
(T('Twitter'), False, 'http://twitter.com/web2py'),
(T('Live Chat'), False,
'http://webchat.freenode.net/?channels=web2py'),
]),
(T('Plugins'), False, None, [
('plugin_wiki', False,
'http://web2py.com/examples/default/download'),
(T('Other Plugins'), False,
'http://web2py.com/plugins'),
(T('Layout Plugins'),
False, 'http://web2py.com/layouts'),
])
]
)]
if DEVELOPMENT_MENU: _()
if "auth" in locals(): auth.wikimenu()
| 44.319149
| 79
| 0.478957
|
57a0d5307d414a79430cf94b3c5be2d1bef4c583
| 450
|
py
|
Python
|
examples/python/cpu/tensors/ocean_fmod_01.py
|
kant/ocean-tensor-package
|
fb3fcff8bba7f4ef6cd8b8d02f0e1be1258da02d
|
[
"Apache-2.0"
] | 27
|
2018-08-16T21:32:49.000Z
|
2021-11-30T10:31:08.000Z
|
examples/python/cpu/tensors/ocean_fmod_01.py
|
kant/ocean-tensor-package
|
fb3fcff8bba7f4ef6cd8b8d02f0e1be1258da02d
|
[
"Apache-2.0"
] | null | null | null |
examples/python/cpu/tensors/ocean_fmod_01.py
|
kant/ocean-tensor-package
|
fb3fcff8bba7f4ef6cd8b8d02f0e1be1258da02d
|
[
"Apache-2.0"
] | 13
|
2018-08-17T17:33:16.000Z
|
2021-11-30T10:31:09.000Z
|
import pyOcean_cpu as ocean
a = [-20,20,-20,20]
b = [3,3,-3,-3]
c = [1,2,-2,-1]
print(ocean.mod(ocean.int64(a),ocean.int64(b)))
print(ocean.mod(ocean.float(a),ocean.float(b)))
print(ocean.mod(ocean.double(a),ocean.double(b)))
print(ocean.int64(c))
print("")
c = [-2,2,-2,2]
print(ocean.fmod(ocean.int64(a), ocean.int64(b)))
print(ocean.fmod(ocean.float(a),ocean.float(b)))
print(ocean.fmod(ocean.double(a),ocean.double(b)))
print(ocean.int64(c))
| 23.684211
| 50
| 0.671111
|
b095a08f365591c8db07615a90cf461d3879dce2
| 7,829
|
py
|
Python
|
tools/convert_vgm_to_pvm.py
|
c64scene-ar/64k-ought-to-be-enough
|
d97997ecd5ddb598bd58fa61da76e4b0c10c5bc1
|
[
"MIT"
] | 4
|
2018-09-27T06:44:33.000Z
|
2019-03-16T13:37:01.000Z
|
tools/convert_vgm_to_pvm.py
|
c64scene-ar/64k-ought-to-be-enough
|
d97997ecd5ddb598bd58fa61da76e4b0c10c5bc1
|
[
"MIT"
] | null | null | null |
tools/convert_vgm_to_pvm.py
|
c64scene-ar/64k-ought-to-be-enough
|
d97997ecd5ddb598bd58fa61da76e4b0c10c5bc1
|
[
"MIT"
] | 2
|
2018-10-16T01:49:09.000Z
|
2020-08-11T12:35:55.000Z
|
#!/usr/bin/env python3
# -----------------------------------------------------------------------------
# converts VGM files to a more compact file format (optimized format for Tandy)
# -----------------------------------------------------------------------------
"""
Tool to convert VGM (Video Game Music) to PVM (Player VGM Music)
It is the same, but smaller, around 30% smaller.
For the moment, the only supported chip is SN76489 (Sega Master System in
Deflemask).
"""
import argparse
import os
import struct
import sys
__docformat__ = 'restructuredtext'
class ToPVM:
"""The class that does all the conversions"""
# 3 MSB bits are designed for commands
# 5 LSB bits are for data for the command
DATA = 0b00000000 # 000xxxxx (xxxxxx = len of data)
DATA_EXTRA = 0b00100000 # 001----- next byte will have the data len
DELAY = 0b01000000 # 010xxxxx (xxxxxx = cycles to delay)
DELAY_EXTRA = 0b01100000 # 011----- next byte will have the delay
END = 0b10000000 # 100-----
def __init__(self, vgm_fd):
self._vgm_fd = vgm_fd
path = os.path.dirname(vgm_fd.name)
basename = os.path.basename(vgm_fd.name)
name = '%s.%s' % (os.path.splitext(basename)[0], 'pvm')
self._out_filename = os.path.join(path, name)
self._output_data = bytearray()
self._current_port_data = bytearray()
self._should_loop = False
self._pvm_loop_offset = 0
def run(self):
"""Execute the conversor."""
with open(self._out_filename, 'w+') as fd_out:
# FIXME: Assuming VGM version is 1.50 (64 bytes of header)
header = bytearray(self._vgm_fd.read(0x40))
print('Converting: %s -> %s...' %
(self._vgm_fd.name, self._out_filename), end='')
# 0x00: "Vgm " (0x56 0x67 0x6d 0x20) file identification (32 bits)
if header[:4].decode('utf-8') != 'Vgm ':
print(' failed. Not a valid VGM file')
return
# 0x08: Version number (32 bits)
# Version 1.50 is stored as 0x00000150, stored as 0x50 0x01 0x00 0x00.
# This is used for backwards compatibility in players, and defines which
# header values are valid.
vgm_version = struct.unpack_from("<I", header, 8)[0]
if vgm_version != 0x150:
print(' failed. Invalid VGM version: %x (not 0x150)' %
vgm_version)
return
# 0x0c: SN76489 clock (32 bits)
# Input clock rate in Hz for the SN76489 PSG chip. A typical value is
# 3579545. It should be 0 if there is no PSG chip used.
sn76489_clock = struct.unpack_from("<I", header, 12)[0]
if sn76489_clock != 3579545:
print(' failed. Not a VGM SN76489 song')
return
# 0x04: Eof offset (32 bits)
# Relative offset to end of file (i.e. file length - 4).
# This is mainly used to find the next track when concatanating
# player stubs and multiple files.
file_len = struct.unpack_from("<I", header, 4)[0]
data = bytearray(self._vgm_fd.read(file_len + 4 - 0x40))
# 0x1c: Loop offset (32 bits)
# Relative offset to loop point, or 0 if no loop.
# For example, if the data for the one-off intro to a song was in bytes
# 0x0040-0x3fff of the file, but the main looping section started at
# 0x4000, this would contain the value 0x4000-0x1c = 0x00003fe4.
loop = struct.unpack_from("<I", header, 0x1c)[0]
self._should_loop = True if loop != 0 else False
vgm_loop_offset = loop + 0x1c - 0x40
i = 0
while i < len(data):
# when looping, flush RLE since loop should jump to start
# of valid code
if self._should_loop and i == vgm_loop_offset:
self.flush_current_port_data()
self._pvm_loop_offset = len(self._output_data)
if data[i] == 0x50:
self.add_port_data(data[i+1])
i = i+2
elif data[i] == 0x61:
# unpack little endian unsigned short
delay = struct.unpack_from("<H", data, i+1)[0]
self.add_n_delay(delay)
i = i+3
elif data[i] == 0x62:
self.add_single_delay()
i = i+1
elif data[i] == 0x66:
self.add_end()
break
else:
raise Exception('Unknown value: data[0x%x] = 0x%x' %
(i, data[i]))
self.prepend_header()
old_len = file_len + 4
new_len = len(self._output_data)
if new_len < 65536:
fd_out.buffer.write(self._output_data)
print(' done (%d%% smaller)' % (100-(100*new_len/old_len)))
else:
print(' failed. converted size %d > 65535' % new_len)
def prepend_header(self):
HEADER_LEN = 16
VERSION_LO = 0
VERSION_HI = 1
header = bytearray()
# signature: 4 bytes
header += 'PVM '.encode('utf-8')
# total len: 4 bytes
l = len(self._output_data) + HEADER_LEN
total_len = struct.pack("<I", l)
header += total_len
# version: 2 bytes. minor, major
header.append(VERSION_LO)
header.append(VERSION_HI)
# flags: 2 bytes
# which procesor is supported
# either PAL/NTSC
# clock
# should loop
flags = 0x0
if self._should_loop:
flags |= 0x1
header.append(0)
header.append(flags)
# loop offset: 4 bytes
loop_offset = struct.pack("<I", self._pvm_loop_offset)
header += loop_offset
self._output_data = header + self._output_data
def add_port_data(self, byte_data):
self._current_port_data.append(byte_data)
def add_single_delay(self):
self.flush_current_port_data()
self._output_data.append(self.DELAY | 1)
def add_n_delay(self, delay):
delay_val = delay // 0x02df
if delay_val == 0:
return
self.flush_current_port_data()
if delay_val > 31:
self._output_data.append(self.DELAY_EXTRA)
self._output_data.append(delay_val)
else:
self._output_data.append(self.DELAY | delay_val)
def add_end(self):
self._output_data.append(self.END)
def flush_current_port_data(self):
l = len(self._current_port_data)
if l == 0:
return
if l > 31:
self._output_data.append(self.DATA_EXTRA)
self._output_data.append(l)
else:
self._output_data.append(self.DATA | l)
self._output_data = self._output_data + self._current_port_data
self._current_port_data = bytearray()
def parse_args():
"""Parse the arguments."""
parser = argparse.ArgumentParser(
description='Converts VGM to PVM',
epilog="""Example:
$ %(prog)s my_music.vgm
""")
parser.add_argument('filenames',
metavar='<filename>',
nargs='+',
type=argparse.FileType('rb'),
help='files to convert to pvm format')
args = parser.parse_args()
return args
def main():
"""Main function."""
args = parse_args()
print('VGM to PVM v0.2 - riq/pvm - http://pungas.space\n')
for fd in args.filenames:
ToPVM(fd).run()
if __name__ == "__main__":
main()
| 34.03913
| 85
| 0.544897
|
c75bfcbc0e16be2452d3047d1c4377b1784e0540
| 4,725
|
py
|
Python
|
core/python/setup.py
|
Zeleznyj/spirit
|
5e23bf3be5aa4bacf5aae24514b0b22cbd395619
|
[
"MIT"
] | null | null | null |
core/python/setup.py
|
Zeleznyj/spirit
|
5e23bf3be5aa4bacf5aae24514b0b22cbd395619
|
[
"MIT"
] | null | null | null |
core/python/setup.py
|
Zeleznyj/spirit
|
5e23bf3be5aa4bacf5aae24514b0b22cbd395619
|
[
"MIT"
] | null | null | null |
import codecs
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import datetime
from distutils.util import get_platform
from setuptools import setup, Command
from wheel.bdist_wheel import bdist_wheel as bdist_wheel_
NAME = "spirit"
PACKAGES = ['spirit', 'spirit.parameters']
META_PATH = os.path.join("spirit", "__init__.py")
KEYWORDS = ["Spirit", "Spin Dynamics"]
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: C++",
"Programming Language :: Python",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries :: Python Modules",
]
INSTALL_REQUIRES = ["numpy"]
###############################################################################
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
def get_git_commit_datetime():
try:
commit_hash = subprocess.check_output("git rev-parse HEAD", shell=True, stderr=subprocess.STDOUT).decode("utf-8").strip()
commit_datetime = subprocess.check_output("git show -s --format=%ci "+commit_hash, shell=True, stderr=subprocess.STDOUT).decode("utf-8").strip()
print(commit_datetime)
datetime_object = datetime.datetime.strptime(commit_datetime, '%Y-%m-%d %H:%M:%S +%f')
print("{:%Y%m%d%H%M%S}".format(datetime_object))
return "{:%Y%m%d%H%M%S}".format(datetime_object)
except subprocess.CalledProcessError as cpe:
print(cpe.output)
return "00000000000000"
import unittest
def my_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('test', pattern='*.py')
return test_suite
class bdist_wheel(bdist_wheel_):
def finalize_options(self):
from sys import platform as _platform
platform_name = get_platform()
if _platform == "linux" or _platform == "linux2":
# Linux
platform_name = 'manylinux1_x86_64'
bdist_wheel_.finalize_options(self)
self.universal = True
self.plat_name_supplied = True
self.plat_name = platform_name
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
if __name__ == "__main__":
# If the environment variable SPIRIT_VERSION_SUFFIX is defined,
# it is appended to the package version number.
version_suffix = ""
add_version_suffix = os.environ.get("SPIRIT_ADD_VERSION_SUFFIX", "")
if add_version_suffix.lower() in ("yes", "true", "t", "1"):
timepoint_string = get_git_commit_datetime()
if timepoint_string == "00000000000000":
timepoint_string = "{:%Y%m%d%H%M}".format(datetime.datetime.now())
version_suffix = ".dev"+timepoint_string
print("setup.py: package version suffix = ", version_suffix)
# Setup the package info
setup(
name = NAME,
description = find_meta("description"),
long_description = read('README.md'),
license = find_meta("license"),
url = find_meta("uri"),
version = find_meta("version")+version_suffix,
author = find_meta("author"),
author_email = find_meta("email"),
maintainer = find_meta("author"),
maintainer_email = find_meta("email"),
keywords = KEYWORDS,
packages = PACKAGES,
classifiers = CLASSIFIERS,
install_requires = INSTALL_REQUIRES,
package_data = {
'spirit': ['libSpirit.dylib', 'libSpirit.so', 'libSpirit.dll'],
},
cmdclass = {'bdist_wheel': bdist_wheel, 'clean': CleanCommand},
test_suite = 'setup.my_test_suite',
)
| 33.992806
| 152
| 0.622011
|
bdfe8d6a5a1ce8d9ef1c9f18994293f52515480e
| 5,862
|
py
|
Python
|
venv/lib/python3.7/site-packages/rqdatac/services/orm/balance_sheet_sql.py
|
CatTiger/vnpy
|
7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/rqdatac/services/orm/balance_sheet_sql.py
|
CatTiger/vnpy
|
7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b
|
[
"MIT"
] | 1
|
2020-04-21T02:42:32.000Z
|
2020-04-21T02:42:32.000Z
|
venv/lib/python3.7/site-packages/rqdatac/services/orm/balance_sheet_sql.py
|
CatTiger/vnpy
|
7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from sqlalchemy import Numeric, Column
from .fundamental_base_sql import FundamentalBase
class StkBalaGen(FundamentalBase):
cash = Column(Numeric(18, 2))
financial_asset_held_for_trading = Column(Numeric(18, 2))
cash_equivalent = Column(Numeric(18, 2))
current_investment = Column(Numeric(18, 2))
current_investment_reserve = Column(Numeric(18, 2))
net_current_investment = Column(Numeric(18, 2))
bill_receivable = Column(Numeric(18, 2))
devidend_receivable = Column(Numeric(18, 2))
interest_receivable = Column(Numeric(18, 2))
accts_receivable = Column(Numeric(18, 2))
other_accts_receivable = Column(Numeric(18, 2))
bad_debt_reserve = Column(Numeric(18, 2))
net_accts_receivable = Column(Numeric(18, 2))
other_receivables = Column(Numeric(18, 2))
prepayment = Column(Numeric(18, 2))
subsidy_receivable = Column(Numeric(18, 2))
prepaid_tax = Column(Numeric(18, 2))
inventory = Column(Numeric(18, 2))
inventory_depreciation_reserve = Column(Numeric(18, 2))
net_inventory = Column(Numeric(18, 2))
deferred_expense = Column(Numeric(18, 2))
contract_work = Column(Numeric(18, 2))
long_term_debt_due_one_year = Column(Numeric(18, 2))
non_current_debt_due_one_year = Column(Numeric(18, 2))
other_current_assets = Column(Numeric(18, 2))
current_assets = Column(Numeric(18, 2))
financial_asset_available_for_sale = Column(Numeric(18, 2))
financial_asset_hold_to_maturity = Column(Numeric(18, 2))
real_estate_investment = Column(Numeric(18, 2))
long_term_equity_investment = Column(Numeric(18, 2))
long_term_receivables = Column(Numeric(18, 2))
long_term_debt_investment = Column(Numeric(18, 2))
other_long_term_investment = Column(Numeric(18, 2))
long_term_investment = Column(Numeric(18, 2))
provision_long_term_investment = Column(Numeric(18, 2))
net_long_term_equity_investment = Column(Numeric(18, 2))
net_long_term_debt_investment = Column(Numeric(18, 2))
net_long_term_investment = Column(Numeric(18, 2))
cost_fixed_assets = Column(Numeric(18, 2))
accumulated_depreciation = Column(Numeric(18, 2))
net_val_fixed_assets = Column(Numeric(18, 2))
depreciation_reserve = Column(Numeric(18, 2))
net_fixed_assets = Column(Numeric(18, 2))
engineer_material = Column(Numeric(18, 2))
construction_in_progress = Column(Numeric(18, 2))
fixed_asset_to_be_disposed = Column(Numeric(18, 2))
capitalized_biological_assets = Column(Numeric(18, 2))
oil_and_gas_assets = Column(Numeric(18, 2))
total_fixed_assets = Column(Numeric(18, 2))
intangible_assets = Column(Numeric(18, 2))
impairment_intangible_assets = Column(Numeric(18, 2))
goodwill = Column(Numeric(18, 2))
deferred_charges = Column(Numeric(18, 2))
long_term_deferred_expenses = Column(Numeric(18, 2))
other_long_term_assets = Column(Numeric(18, 2))
total_intangible_and_other_assets = Column(Numeric(18, 2))
deferred_income_tax_assets = Column(Numeric(18, 2))
other_non_current_assets = Column(Numeric(18, 2))
non_current_assets = Column(Numeric(18, 2))
total_assets = Column(Numeric(18, 2))
short_term_loans = Column(Numeric(18, 2))
financial_liabilities = Column(Numeric(18, 2))
notes_payable = Column(Numeric(18, 2))
accts_payable = Column(Numeric(18, 2))
advance_from_customers = Column(Numeric(18, 2))
proxy_sale_revenue = Column(Numeric(18, 2))
payroll_payable = Column(Numeric(18, 2))
walfare_payable = Column(Numeric(18, 2))
dividend_payable = Column(Numeric(18, 2))
tax_payable = Column(Numeric(18, 2))
interest_payable = Column(Numeric(18, 2))
other_fees_payable = Column(Numeric(18, 2))
internal_accts_payable = Column(Numeric(18, 2))
other_payable = Column(Numeric(18, 2))
short_term_debt = Column(Numeric(18, 2))
accrued_expense = Column(Numeric(18, 2))
estimated_liabilities = Column(Numeric(18, 2))
deferred_income = Column(Numeric(18, 2))
long_term_liabilities_due_one_year = Column(Numeric(18, 2))
other_current_liabilities = Column(Numeric(18, 2))
current_liabilities = Column(Numeric(18, 2))
long_term_loans = Column(Numeric(18, 2))
bond_payable = Column(Numeric(18, 2))
long_term_payable = Column(Numeric(18, 2))
grants_received = Column(Numeric(18, 2))
housing_revolving_funds = Column(Numeric(18, 2))
other_long_term_liabilities = Column(Numeric(18, 2))
long_term_liabilities = Column(Numeric(18, 2))
deferred_income_tax_liabilities = Column(Numeric(18, 2))
other_non_current_liabilities = Column(Numeric(18, 2))
non_current_liabilities = Column(Numeric(18, 2))
total_liabilities = Column(Numeric(18, 2))
paid_in_capital = Column(Numeric(18, 2))
invesment_refund = Column(Numeric(18, 2))
capital_reserve = Column(Numeric(18, 2))
surplus_reserve = Column(Numeric(18, 2))
statutory_reserve = Column(Numeric(18, 2))
unrealised_investment_loss = Column(Numeric(18, 2))
undistributed_profit = Column(Numeric(18, 2))
equity_parent_company = Column(Numeric(18, 2))
total_equity = Column(Numeric(18, 2))
minority_interest = Column(Numeric(18, 2))
total_equity_and_liabilities = Column(Numeric(18, 2))
provision = Column(Numeric(18, 2))
deferred_revenue = Column(Numeric(18, 2))
non_current_liability_due_one_year = Column(Numeric(18, 2))
liability_prefer_stock = Column(Numeric(18, 2))
equity_prefer_stock = Column(Numeric(18, 2))
equity_preferred_stock = Column(Numeric(18, 2))
unrealised_investment_losses = Column(Numeric(19, 4))
income_tax_refund = Column(Numeric(19, 4))
non_current_asset_due_one_year = Column(Numeric(19, 4))
long_term_deferred_income = Column(Numeric(19, 4))
dividend_receivable = Column(Numeric(19, 4))
| 47.658537
| 63
| 0.723303
|
c874b7005c5e7428d6b8beb0cf111141c0c4f533
| 231
|
py
|
Python
|
sql-alchemy/sql_alchemy/models.py
|
ChristopherNothmann-lab/SqlAlchemy
|
ac9d0e86335a41a88981742cac1f0fbf089be032
|
[
"MIT"
] | null | null | null |
sql-alchemy/sql_alchemy/models.py
|
ChristopherNothmann-lab/SqlAlchemy
|
ac9d0e86335a41a88981742cac1f0fbf089be032
|
[
"MIT"
] | null | null | null |
sql-alchemy/sql_alchemy/models.py
|
ChristopherNothmann-lab/SqlAlchemy
|
ac9d0e86335a41a88981742cac1f0fbf089be032
|
[
"MIT"
] | null | null | null |
from sql_alchemy.database import Database
db = Database().get_db()
class Student(db.Model):
__table_name__ = 'student'
id = db.Column('id', db.Integer, primary_key=True)
full_name = db.Column('full_name', db.Unicode)
| 25.666667
| 54
| 0.709957
|
41c807b482df716f38993d239fca06e8ff3a70fb
| 1,850
|
py
|
Python
|
Microsoft.ML.TensorFlow.TestModels/model_types_test/type_test.py
|
terrajobst/machinelearning-testdata
|
296625f4e49d50fcd6a48a0d92bea7584e198c0f
|
[
"MIT"
] | 6
|
2019-03-02T18:54:43.000Z
|
2021-12-28T13:23:25.000Z
|
Microsoft.ML.TensorFlow.TestModels/model_types_test/type_test.py
|
terrajobst/machinelearning-testdata
|
296625f4e49d50fcd6a48a0d92bea7584e198c0f
|
[
"MIT"
] | 7
|
2018-08-28T22:28:19.000Z
|
2022-03-14T19:53:27.000Z
|
Microsoft.ML.TensorFlow.TestModels/model_types_test/type_test.py
|
terrajobst/machinelearning-testdata
|
296625f4e49d50fcd6a48a0d92bea7584e198c0f
|
[
"MIT"
] | 12
|
2018-08-28T21:25:42.000Z
|
2022-02-27T17:06:46.000Z
|
import tensorflow as tf
f64 = tf.placeholder(dtype=tf.float64, shape=[None,2], name="f64")
f32 = tf.placeholder(dtype=tf.float32, shape=[None,2], name="f32")
i64 = tf.placeholder(dtype=tf.int64, shape=[None,2], name="i64")
i32 = tf.placeholder(dtype=tf.int32, shape=[None,2], name="i32")
i16 = tf.placeholder(dtype=tf.int16, shape=[None,2], name="i16")
i8 = tf.placeholder(dtype=tf.int8, shape=[None,2], name="i8")
u64 = tf.placeholder(dtype=tf.uint64, shape=[None,2], name="u64")
u32 = tf.placeholder(dtype=tf.uint32, shape=[None,2], name="u32")
u16 = tf.placeholder(dtype=tf.uint16, shape=[None,2], name="u16")
u8 = tf.placeholder(dtype=tf.uint8, shape=[None,2], name="u8")
b = tf.placeholder(dtype=tf.bool, shape=[None,2], name="b")
inputs = {'f64':f64, 'f32':f32,
'i64':i64, 'i32':i32, 'i16':i16, 'i8':i8,
'u64':u64, 'u32':i32, 'u16':i16, 'u8':i8,
'b': b}
o_f64 = tf.identity(f64, name="o_f64")
o_f32 = tf.identity(f32, name="o_f32")
o_i64 = tf.identity(i64, name="o_i64")
o_i32 = tf.identity(i32, name="o_i32")
o_i16 = tf.identity(i16, name="o_i16")
o_i8 = tf.identity(i8, name="o_i8")
o_u64 = tf.identity(u64, name="o_u64")
o_u32 = tf.identity(u32, name="o_u32")
o_u16 = tf.identity(u16, name="o_u16")
o_u8 = tf.identity(u8, name="o_u8")
o_b = tf.identity(b, name="o_b")
outputs = {'o_f64':o_f64, 'o_f32':o_f32,
'o_i64':o_i64, 'o_i32':o_i32, 'o_i16':o_i16, 'o_i8':o_i8,
'o_u64':o_u64, 'o_u32':o_i32, 'o_u16':o_i16, 'o_u8':o_i8,
'o_b': o_b}
# Add an op to initialize the variables.
init_op = tf.global_variables_initializer()
# Later, launch the model, initialize the variables, do some work, and save the
# variables to disk.
with tf.Session() as sess:
sess.run(init_op)
tf.saved_model.simple_save(sess, r'model_types_test', inputs=inputs, outputs=outputs )
| 39.361702
| 88
| 0.658378
|
d8a4e0eaa50ce42d4a14b9ba2c949b54e41f1cbb
| 4,441
|
py
|
Python
|
Deep_Q_Learning/gridworld.py
|
leonardoaraujosantos/Learn_RL
|
da87bfd1346f9b7795df1197458b802cbc03c52d
|
[
"MIT"
] | 1
|
2018-05-25T21:51:57.000Z
|
2018-05-25T21:51:57.000Z
|
Deep_Q_Learning/gridworld.py
|
leonardoaraujosantos/Learn_RL
|
da87bfd1346f9b7795df1197458b802cbc03c52d
|
[
"MIT"
] | null | null | null |
Deep_Q_Learning/gridworld.py
|
leonardoaraujosantos/Learn_RL
|
da87bfd1346f9b7795df1197458b802cbc03c52d
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
import itertools
import scipy.misc
import matplotlib.pyplot as plt
class gameOb():
def __init__(self, coordinates, size, intensity, channel, reward, name):
self.x = coordinates[0]
self.y = coordinates[1]
self.size = size
self.intensity = intensity
self.channel = channel
self.reward = reward
self.name = name
class gameEnv():
def __init__(self, partial, size):
self.sizeX = size
self.sizeY = size
self.actions = 4
self.objects = []
self.partial = partial
a = self.reset()
plt.imshow(a, interpolation="nearest")
def reset(self):
self.objects = []
hero = gameOb(self.newPosition(), 1, 1, 2, None, 'hero')
self.objects.append(hero)
bug = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal')
self.objects.append(bug)
hole = gameOb(self.newPosition(), 1, 1, 0, -1, 'fire')
self.objects.append(hole)
bug2 = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal')
self.objects.append(bug2)
hole2 = gameOb(self.newPosition(), 1, 1, 0, -1, 'fire')
self.objects.append(hole2)
bug3 = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal')
self.objects.append(bug3)
bug4 = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal')
self.objects.append(bug4)
state = self.renderEnv()
self.state = state
return state
def moveChar(self, direction):
# 0 - up, 1 - down, 2 - left, 3 - right
hero = self.objects[0]
heroX = hero.x
heroY = hero.y
penalize = 0.
if direction == 0 and hero.y >= 1:
hero.y -= 1
if direction == 1 and hero.y <= self.sizeY - 2:
hero.y += 1
if direction == 2 and hero.x >= 1:
hero.x -= 1
if direction == 3 and hero.x <= self.sizeX - 2:
hero.x += 1
if hero.x == heroX and hero.y == heroY:
penalize = 0.0
self.objects[0] = hero
return penalize
def newPosition(self):
iterables = [range(self.sizeX), range(self.sizeY)]
points = []
for t in itertools.product(*iterables):
points.append(t)
currentPositions = []
for objectA in self.objects:
if (objectA.x, objectA.y) not in currentPositions:
currentPositions.append((objectA.x, objectA.y))
for pos in currentPositions:
points.remove(pos)
location = np.random.choice(range(len(points)), replace=False)
return points[location]
def checkGoal(self):
others = []
for obj in self.objects:
if obj.name == 'hero':
hero = obj
else:
others.append(obj)
ended = False
for other in others:
if hero.x == other.x and hero.y == other.y:
self.objects.remove(other)
if other.reward == 1:
self.objects.append(gameOb(self.newPosition(), 1, 1, 1, 1, 'goal'))
else:
self.objects.append(gameOb(self.newPosition(), 1, 1, 0, -1, 'fire'))
return other.reward, False
if ended == False:
return 0.0, False
def renderEnv(self):
# a = np.zeros([self.sizeY,self.sizeX,3])
a = np.ones([self.sizeY + 2, self.sizeX + 2, 3])
a[1:-1, 1:-1, :] = 0
hero = None
for item in self.objects:
a[item.y + 1:item.y + item.size + 1, item.x + 1:item.x + item.size + 1, item.channel] = item.intensity
if item.name == 'hero':
hero = item
if self.partial == True:
a = a[hero.y:hero.y + 3, hero.x:hero.x + 3, :]
b = scipy.misc.imresize(a[:, :, 0], [84, 84, 1], interp='nearest')
c = scipy.misc.imresize(a[:, :, 1], [84, 84, 1], interp='nearest')
d = scipy.misc.imresize(a[:, :, 2], [84, 84, 1], interp='nearest')
a = np.stack([b, c, d], axis=2)
return a
def step(self, action):
penalty = self.moveChar(action)
reward, done = self.checkGoal()
state = self.renderEnv()
if reward == None:
print(done)
print(reward)
print(penalty)
return state, (reward + penalty), done
else:
return state, (reward + penalty), done
| 34.695313
| 114
| 0.525332
|
67374ee28806cf7ee7a8ab8b0ec7a3d1e36c3f38
| 73
|
py
|
Python
|
26.py
|
fptitsyn/ben
|
460e6a32897076913a43160261af2dd3cca851c8
|
[
"MIT"
] | null | null | null |
26.py
|
fptitsyn/ben
|
460e6a32897076913a43160261af2dd3cca851c8
|
[
"MIT"
] | null | null | null |
26.py
|
fptitsyn/ben
|
460e6a32897076913a43160261af2dd3cca851c8
|
[
"MIT"
] | null | null | null |
f = 5
g = 21 - f
t = g // 3
f = (2 * f) % t
g = g + f + t
print(g)
| 10.428571
| 16
| 0.315068
|
977fc0bdab21302c0d9717d120d924e3fce57920
| 2,556
|
py
|
Python
|
cistrome_processing/motif_pipeline/bin/add_motifs_to_h5.py
|
Wang-Cankun/lisa2
|
2407cc3c12f43bf41f0e14b2a8a5fcdfe07ff310
|
[
"MIT"
] | 17
|
2020-09-21T20:04:43.000Z
|
2022-01-15T11:25:41.000Z
|
cistrome_processing/motif_pipeline/bin/add_motifs_to_h5.py
|
Wang-Cankun/lisa2
|
2407cc3c12f43bf41f0e14b2a8a5fcdfe07ff310
|
[
"MIT"
] | 1
|
2021-10-04T22:39:05.000Z
|
2021-10-04T22:39:05.000Z
|
cistrome_processing/motif_pipeline/bin/add_motifs_to_h5.py
|
Wang-Cankun/lisa2
|
2407cc3c12f43bf41f0e14b2a8a5fcdfe07ff310
|
[
"MIT"
] | 5
|
2021-02-16T13:16:34.000Z
|
2022-03-08T16:15:25.000Z
|
from lisa.core.data_interface import DataInterface
import argparse
import os
import numpy as np
import pandas as pd
from scipy import sparse
TECHNOLOGY = 'Motifs'
def main(species, window_size, motif_metadata, bin_sorted_hits, group_loci = 100000):
motif_metadata = pd.read_csv(motif_metadata, sep = '\t', header = None)
motif_metadata.columns = ['dataset_id', 'factor', 'source']
motif_metadata = motif_metadata.set_index('dataset_id')
motif_metadata = motif_metadata.drop_duplicates()
data = DataInterface(species, window_size= window_size, download_if_not_exists=False,
make_new=False, load_genes=False)
print(data.path)
raise Exception()
data.create_binding_dataset(TECHNOLOGY, motif_metadata.index.values, **motif_metadata.to_dict('list'))
id_to_idx_map = dict(zip(data.list_binding_datasets(TECHNOLOGY), np.arange(len(data.list_binding_datasets(TECHNOLOGY)))))
current_pos = 0
last_added_chunk = 0
i = 0
rows,cols,scores=[],[],[]
with open(bin_sorted_hits, 'r') as f:
for line in f:
motif_id, bin_num, score = line.strip().split()
bin_num = int(bin_num)
if bin_num < current_pos:
raise Exception('Input file not sorted!')
elif bin_num > current_pos and i >= group_loci:
print('Adding matrix segment ...')
matrix_form = sparse.coo_matrix((scores, (rows, cols))).tocsr()
data.append_csr(TECHNOLOGY, matrix_form)
last_added_chunk = bin_num
i=0
rows,cols,scores=[],[],[]
tf_idx = id_to_idx_map[motif_id]
rows.append(bin_num - last_added_chunk)
cols.append(tf_idx)
scores.append(int(score))
current_pos = bin_num
i+=1
if len(rows) > 0:
matrix_form = sparse.coo_matrix((scores, (rows, cols))).tocsr()
data.append_csr(TECHNOLOGY, matrix_form)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Saves indices lists to factor binding h5. Filename specifies metadata: \{technology\}-\{dataset_id\}-\{metadata1_metadata2_...\}')
parser.add_argument('species', type = str, choices = ['hg38','mm10'])
parser.add_argument('window_size', type = int)
parser.add_argument('motif_metadata', type = str)
parser.add_argument('hits', type = str)
args = parser.parse_args()
main(args.species, int(args.window_size), args.motif_metadata, args.hits)
| 36
| 180
| 0.650626
|
d88413f70c9b7ee39e0a2e33ad9f44fb4ba3acee
| 525
|
py
|
Python
|
account/forms.py
|
alexalvarex/CentroPlanificacion
|
b1ec85a5bbdfcf91a2cc9afbb0f5b213d4e09169
|
[
"Apache-2.0"
] | null | null | null |
account/forms.py
|
alexalvarex/CentroPlanificacion
|
b1ec85a5bbdfcf91a2cc9afbb0f5b213d4e09169
|
[
"Apache-2.0"
] | null | null | null |
account/forms.py
|
alexalvarex/CentroPlanificacion
|
b1ec85a5bbdfcf91a2cc9afbb0f5b213d4e09169
|
[
"Apache-2.0"
] | null | null | null |
#coding: utf8
from django.contrib.auth.forms import AuthenticationForm
class MyAuthenticationForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(MyAuthenticationForm, self).__init__(*args, **kwargs)
self.base_fields['username'].widget.attrs['class'] = 'form-control mr-sm-2'
self.base_fields['password'].widget.attrs['class'] = 'form-control mr-sm-2'
self.base_fields['username'].widget.attrs['placeholder'] = 'Username'
self.base_fields['password'].widget.attrs['placeholder'] = 'Password'
| 37.5
| 78
| 0.744762
|
6253ba916d662ef96b7960af245f7602536cc918
| 2,158
|
py
|
Python
|
tests/buckets/test_bucket_authorization.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | 3
|
2019-05-04T02:07:28.000Z
|
2020-10-16T17:47:44.000Z
|
tests/buckets/test_bucket_authorization.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | 604
|
2019-02-21T18:14:51.000Z
|
2022-02-10T08:13:54.000Z
|
tests/buckets/test_bucket_authorization.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from graphql_relay import to_global_id
from creator.buckets.factories import BucketFactory
@pytest.mark.parametrize("resource", ["bucket"])
@pytest.mark.parametrize(
"user_group,allowed",
[
("Administrators", True),
("Services", False),
("Developers", False),
("Investigators", False),
("Bioinformatics", False),
(None, False),
],
)
def test_get_node_by_id(db, clients, prep_file, resource, user_group, allowed):
"""
Test that resource may or may not be retrieved by (relay) id
"""
# Select client based on user type
client = clients.get(user_group)
bucket = BucketFactory()
node_id = to_global_id("BucketNode", bucket.name)
# Now try to get node by the relay id
query = f'{{{resource}(id: "{node_id}") {{ id }} }}'
resp = client.post(
"/graphql", data={"query": query}, content_type="application/json"
)
# Should get back the node with id if expected, None if not
if allowed:
assert resp.json()["data"][resource]["id"] == node_id
else:
assert resp.json()["errors"][0]["message"] == "Not allowed"
@pytest.mark.parametrize("resource", ["allBuckets"])
@pytest.mark.parametrize(
"user_group,allowed",
[
("Administrators", True),
("Services", False),
("Developers", False),
("Investigators", False),
("Bioinformatics", False),
(None, False),
],
)
def test_get_all(db, clients, prep_file, resource, user_group, allowed):
"""
Test that resource may or may not be retrieved by (relay) id
"""
# Select client based on user type
client = clients.get(user_group)
bucket = BucketFactory()
# Now try to get node by the relay id
query = f"{{{resource} {{ edges {{ node {{ id }} }} }} }}"
resp = client.post(
"/graphql", data={"query": query}, content_type="application/json"
)
# Should get back the node with id if expected, None if not
if allowed:
assert len(resp.json()["data"][resource]["edges"]) == 1
else:
assert resp.json()["errors"][0]["message"] == "Not allowed"
| 29.561644
| 79
| 0.610287
|
e6157612d3ddcf5e709c92316be5666e13bad4af
| 342
|
py
|
Python
|
urbarium/title/migrations/0007_auto_20200910_1752.py
|
nikerzetic/zacasno-ime
|
7ca42665a0f64cae7233c994c879f2d81502efec
|
[
"MIT"
] | null | null | null |
urbarium/title/migrations/0007_auto_20200910_1752.py
|
nikerzetic/zacasno-ime
|
7ca42665a0f64cae7233c994c879f2d81502efec
|
[
"MIT"
] | 5
|
2021-03-30T14:18:31.000Z
|
2021-09-22T19:35:18.000Z
|
urbarium/title/migrations/0007_auto_20200910_1752.py
|
nikerzetic/zacasno-ime
|
7ca42665a0f64cae7233c994c879f2d81502efec
|
[
"MIT"
] | 1
|
2020-09-08T10:38:54.000Z
|
2020-09-08T10:38:54.000Z
|
# Generated by Django 3.0.7 on 2020-09-10 15:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('title', '0006_auto_20200713_1412'),
]
operations = [
migrations.AlterModelOptions(
name='title',
options={'ordering': ['name']},
),
]
| 19
| 47
| 0.584795
|
dc50d34c7bbe0c67f0fd60cc52ca656cc9aef838
| 2,084
|
py
|
Python
|
proj/config.py
|
SD2E/uploads-agent
|
a26885f82c30f14b742396646e1d38eba345b259
|
[
"BSD-3-Clause"
] | null | null | null |
proj/config.py
|
SD2E/uploads-agent
|
a26885f82c30f14b742396646e1d38eba345b259
|
[
"BSD-3-Clause"
] | null | null | null |
proj/config.py
|
SD2E/uploads-agent
|
a26885f82c30f14b742396646e1d38eba345b259
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Configuration loader from YML files with
# optional ENV-based overrides
# Usage: from config import settings
import os
import yaml
settings = {}
HERE = os.path.dirname(os.path.abspath(__file__))
PWD = os.getcwd()
ROOT = '/'
'''
Implements some configuration conventions:
1. A Reactor can be configured via a config.yml
file in its top-level directory. The configuration
file can have arbitrary content so long as it can
be resolved from YML to JSON.
2. Any first- or second-level key with a string
value can be overridden at run-time by an env
named _REACTOR_LEVEL1_LEVEL2
3. The configuration is exported as a dict in
the 'settings' variable
'''
def read_config():
"""Reads config.yml into 'settings' with optional ENV overrides"""
config = settings.copy()
# File-based configuration.
places = [ROOT, PWD, HERE]
for p in places:
fname = os.path.join(p, "config.yml")
if os.path.isfile(fname):
try:
with open(fname, "r") as conf:
read_config = yaml.load(conf, Loader=yaml.FullLoader)
config = config.copy()
config.update(read_config)
break
except Exception:
pass
# TODO - Check for duplicate keys coming in from ENV
# TODO - Check that get/set from ENV is successful
for level1 in config.keys():
if type(config[level1]) is str:
env_var = '_REACTOR_' + level1
env_var = env_var.upper()
if os.environ.get(env_var):
config[level1] = os.environ.get(env_var)
elif type(config[level1]) is dict:
for level2 in config[level1].keys():
if type(config[level1][level2]) is str:
env_var = '_REACTOR_' + level1 + '_' + level2
env_var = env_var.upper()
if os.environ.get(env_var):
config[level1][level2] = os.environ.get(env_var)
else:
pass
return config
settings = read_config()
| 30.202899
| 73
| 0.607006
|
bb24d9d052b41dc5e6d3d0e94a9dcf94c8a367dd
| 5,624
|
py
|
Python
|
pglet/linechart.py
|
MrHemlock/pglet-python
|
0d8dd2bb07effdd1a09836641ffa9c7e4271b907
|
[
"MIT"
] | null | null | null |
pglet/linechart.py
|
MrHemlock/pglet-python
|
0d8dd2bb07effdd1a09836641ffa9c7e4271b907
|
[
"MIT"
] | 4
|
2022-01-14T19:30:49.000Z
|
2022-01-19T15:59:03.000Z
|
pglet/linechart.py
|
MrHemlock/pglet-python
|
0d8dd2bb07effdd1a09836641ffa9c7e4271b907
|
[
"MIT"
] | null | null | null |
from typing import Literal, Optional, Union
from beartype import beartype
from pglet.control import Control
X_TYPE = Literal[None, "number", "date"]
class LineChart(Control):
def __init__(
self,
id=None,
legend=None,
tooltips=None,
stroke_width=None,
y_min=None,
y_max=None,
y_ticks=None,
y_format=None,
x_type: X_TYPE = None,
lines=None,
width=None,
height=None,
padding=None,
margin=None,
visible=None,
disabled=None,
):
Control.__init__(
self,
id=id,
width=width,
height=height,
padding=padding,
margin=margin,
visible=visible,
disabled=disabled,
)
self.__lines = []
if lines != None:
for line in lines:
self.__lines.append(line)
self.legend = legend
self.tooltips = tooltips
self.stroke_width = stroke_width
self.y_min = y_min
self.y_max = y_max
self.y_ticks = y_ticks
self.y_format = y_format
self.x_type = x_type
def _get_control_name(self):
return "linechart"
# lines
@property
def lines(self):
return self.__lines
@lines.setter
def lines(self, value):
self.__lines = value
# legend
@property
def legend(self):
return self._get_attr("legend")
@legend.setter
@beartype
def legend(self, value: Optional[bool]):
self._set_attr("legend", value)
# tooltips
@property
def tooltips(self):
return self._get_attr("tooltips")
@tooltips.setter
@beartype
def tooltips(self, value: Optional[bool]):
self._set_attr("tooltips", value)
# stroke_width
@property
def stroke_width(self):
return self._get_attr("strokeWidth")
@stroke_width.setter
@beartype
def stroke_width(self, value: Optional[int]):
self._set_attr("strokeWidth", value)
# y_min
@property
def y_min(self):
return self._get_attr("yMin")
@y_min.setter
@beartype
def y_min(self, value: Union[None, int, float]):
self._set_attr("yMin", value)
# y_max
@property
def y_max(self):
return self._get_attr("yMax")
@y_max.setter
@beartype
def y_max(self, value: Union[None, int, float]):
self._set_attr("yMax", value)
# y_ticks
@property
def y_ticks(self):
return self._get_attr("yTicks")
@y_ticks.setter
@beartype
def y_ticks(self, value: Optional[int]):
self._set_attr("yTicks", value)
# y_format
@property
def y_format(self):
return self._get_attr("yFormat")
@y_format.setter
def y_format(self, value):
self._set_attr("yFormat", value)
# x_type
@property
def x_type(self):
return self._get_attr("xType")
@x_type.setter
@beartype
def x_type(self, value: X_TYPE):
self._set_attr("xType", value)
def _get_children(self):
return self.__lines
class Data(Control):
def __init__(self, id=None, color=None, legend=None, points=None):
Control.__init__(self, id=id)
self.color = color
self.legend = legend
self.__points = []
if points != None:
for point in points:
self.__points.append(point)
# color
@property
def color(self):
return self._get_attr("color")
@color.setter
def color(self, value):
self._set_attr("color", value)
# legend
@property
def legend(self):
return self._get_attr("legend")
@legend.setter
def legend(self, value):
self._set_attr("legend", value)
# points
@property
def points(self):
return self.__points
@points.setter
def points(self, value):
self.__points = value
def _get_control_name(self):
return "data"
def _get_children(self):
return self.__points
class Point(Control):
def __init__(
self,
id=None,
x=None,
y=None,
tick=None,
legend=None,
x_tooltip=None,
y_tooltip=None,
):
Control.__init__(self, id=id)
self.x = x
self.y = y
self.tick = tick
self.legend = legend
self.x_tooltip = x_tooltip
self.y_tooltip = y_tooltip
def _get_control_name(self):
return "p"
# x
@property
def x(self):
return self._get_attr("x")
@x.setter
def x(self, value):
self._set_attr("x", value)
# y
@property
def y(self):
return self._get_attr("y")
@y.setter
@beartype
def y(self, value: Union[None, int, float]):
self._set_attr("y", value)
# tick
@property
def tick(self):
return self._get_attr("tick")
@tick.setter
def tick(self, value):
self._set_attr("tick", value)
# legend
@property
def legend(self):
return self._get_attr("legend")
@legend.setter
def legend(self, value):
self._set_attr("legend", value)
# x_tooltip
@property
def x_tooltip(self):
return self._get_attr("xTooltip")
@x_tooltip.setter
def x_tooltip(self, value):
self._set_attr("xTooltip", value)
# y_tooltip
@property
def y_tooltip(self):
return self._get_attr("yTooltip")
@y_tooltip.setter
def y_tooltip(self, value):
self._set_attr("yTooltip", value)
| 20.752768
| 70
| 0.573969
|
2abdf17f5ba0153d902622d4b340aca21b266023
| 9,467
|
py
|
Python
|
test_tools/onnx_pytorch/common/parser.py
|
cirvine-MSFT/reinforcement_learning
|
c006b21d0a027b78d9285bf2597b503669bac82c
|
[
"MIT"
] | 63
|
2018-10-22T17:11:02.000Z
|
2021-12-08T17:26:41.000Z
|
test_tools/onnx_pytorch/common/parser.py
|
cirvine-MSFT/reinforcement_learning
|
c006b21d0a027b78d9285bf2597b503669bac82c
|
[
"MIT"
] | 160
|
2018-10-09T02:34:57.000Z
|
2022-03-31T15:43:48.000Z
|
test_tools/onnx_pytorch/common/parser.py
|
cirvine-MSFT/reinforcement_learning
|
c006b21d0a027b78d9285bf2597b503669bac82c
|
[
"MIT"
] | 36
|
2018-10-08T21:44:05.000Z
|
2022-03-22T16:20:03.000Z
|
import struct
from reinforcement_learning.messages.flatbuff.v2.EventBatch import EventBatch
from reinforcement_learning.messages.flatbuff.v2.Event import Event
from reinforcement_learning.messages.flatbuff.v2.EventEncoding import EventEncoding
from reinforcement_learning.messages.flatbuff.v2.LearningModeType import LearningModeType
from reinforcement_learning.messages.flatbuff.v2.PayloadType import PayloadType
from reinforcement_learning.messages.flatbuff.v2.OutcomeValue import OutcomeValue
from reinforcement_learning.messages.flatbuff.v2.NumericOutcome import NumericOutcome
from reinforcement_learning.messages.flatbuff.v2.NumericIndex import NumericIndex
from reinforcement_learning.messages.flatbuff.v2.CbEvent import CbEvent
from reinforcement_learning.messages.flatbuff.v2.OutcomeEvent import OutcomeEvent
from reinforcement_learning.messages.flatbuff.v2.MultiSlotEvent import MultiSlotEvent
from reinforcement_learning.messages.flatbuff.v2.CaEvent import CaEvent
from reinforcement_learning.messages.flatbuff.v2.DedupInfo import DedupInfo
from reinforcement_learning.messages.flatbuff.v2.MultiStepEvent import MultiStepEvent
from reinforcement_learning.messages.flatbuff.v2.FileHeader import *
from reinforcement_learning.messages.flatbuff.v2.JoinedEvent import *
from reinforcement_learning.messages.flatbuff.v2.JoinedPayload import *
from reinforcement_learning.messages.flatbuff.v2.CheckpointInfo import *
from reinforcement_learning.messages.flatbuff.v2.RewardFunctionType import *
class Base64Tensor:
@staticmethod
def parse(line):
import numpy as np
import base64
prefix, value = line.split(';')
#many hacks
shape = struct.unpack('4Q', base64.b64decode(prefix))
shape = shape[1:]
return np.array(struct.unpack('%df' % np.prod(shape), base64.b64decode(value))).reshape(shape)
@staticmethod
def parse_dict(context):
return dict(map(lambda kv: (kv[0], Base64Tensor.parse(kv[1])), \
context.items()))
class CbDsjsonParser:
@staticmethod
def parse(line):
import json
obj = json.loads(line)
return {'features': Base64Tensor.parse_dict(obj['c']), 'label': obj['_labelIndex'], 'cost': obj['_label_cost']}
class CbDictParser:
@staticmethod
def parse(obj):
import numpy as np
import json
# features is a json payload wrapped in a bytearray
features = json.loads(obj[0].decode('utf-8'))
label = obj[1]
# I think the logs provide 1-indexed values while the train function is expecting 0-indexed
label = label - 1
from torch import tensor
return {'features': Base64Tensor.parse_dict(features), 'label': label, 'cost': obj[2]}
MSG_TYPE_HEADER = 0x55555555
MSG_TYPE_CHECKPOINT = 0x11111111
MSG_TYPE_REGULAR = 0xFFFFFFFF
MSG_TYPE_EOF = 0xAAAAAAAA
# mostly ripped from the flatbuf parser
class JoinedLogStreamReader:
def __init__(self, buf):
self.buf = buf
self.offset = 0
self.headers = dict()
self.parse_header()
self.read_file_header()
def parse_header(self):
if self.buf[0:4] != b'VWFB':
raise Exception("Invalid file magic")
self.version = struct.unpack('I', self.buf[4:8])[0]
if self.version != 1:
raise Exception(f'Unsuported file version {self.version}')
self.offset = 8
def read(self, size):
if size == 0:
return bytearray([])
data = self.buf[self.offset : self.offset + size]
self.offset += size
return data
def read_message(self):
kind = struct.unpack('I', self.read(4))[0]
length = struct.unpack('I', self.read(4))[0]
payload = self.read(length)
#discard padding
self.read(length % 8)
return (kind, payload)
def read_file_header(self):
msg = self.read_message()
if msg[0] != MSG_TYPE_HEADER:
raise f'Missing file header, found message of type {msg[0]} instead'
header = FileHeader.GetRootAsFileHeader(msg[1], 0)
for i in range(header.PropertiesLength()):
p = header.Properties(i)
self.headers[p.Key().decode('utf-8')] = p.Value().decode('utf-8')
def checkpoint_info(self):
msg = self.read_message()
if msg[0] != MSG_TYPE_CHECKPOINT:
raise f'Missing checkpoint info, found message type of {msg[0]} instead'
return CheckpointInfo.GetRootAsCheckpointInfo(msg[1], 0)
def messages(self):
while True:
msg = self.read_message()
if msg[0] == MSG_TYPE_EOF:
break
if msg[0] != MSG_TYPE_REGULAR:
raise f'Expected Regular message type, found {msg[0]} instead'
yield JoinedPayload.GetRootAsJoinedPayload(msg[1], 0)
return None
# partially ripped from the flatbuf parser
class VWFlatbufferParser:
# data should be a bytearray
def __init__(self, data):
self.reader = JoinedLogStreamReader(data)
self.current_id = None
self.checkpoint_info = self.reader.checkpoint_info()
self.gen = self.read_event_series()
def __iter__(self):
return self.gen
def read_as_string(self, table):
off = table.Pos
length = flatbuffers.encode.Get(flatbuffers.number_types.UOffsetTFlags.packer_type, table.Bytes, off)
start = off + flatbuffers.number_types.UOffsetTFlags.bytewidth
return bytes(table.Bytes[start:start+length])
def cast(self, table, tmp_type):
tmp = tmp_type()
tmp.Init(table.Bytes, table.Pos)
return tmp
def parse_cb(self, payload):
evt = CbEvent.GetRootAsCbEvent(payload, 0)
payload = evt.ContextAsNumpy()
label = evt.ActionIds(0)
return bytearray(payload), label
def parse_outcome(self, payload):
evt = OutcomeEvent.GetRootAsOutcomeEvent(payload, 0)
value = evt.Value()
if evt.ValueType() == OutcomeValue.literal:
value = self.read_as_string(value)
elif evt.ValueType() == OutcomeValue.numeric:
value = self.cast(value, NumericOutcome).Value()
index = None
if evt.ActionTaken() is True:
index = evt.Index()
if evt.IndexType() == OutcomeValue.literal:
# parse_cb only contains numeric index types, so this should also be a numeric index type
raise Exception("literal index types are unsupported")
elif evt.IndexType() == OutcomeValue.numeric:
index = self.cast(index, NumericIndex).Index()
return value, index
def apply_reward_fn(self, rewards):
reward_fn_type = self.checkpoint_info.RewardFunctionType()
if len(rewards) == 0:
return self.checkpoint_info.DefaultReward()
elif reward_fn_type == RewardFunctionType.Earliest:
return rewards[0]
elif reward_fn_type == RewardFunctionType.Average:
return sum(rewards)/len(rewards)
elif reward_fn_type == RewardFunctionType.Median:
from statistics import median
return median(rewards)
elif reward_fn_type == RewardFunctionType.Sum:
return sum(rewards)
elif reward_fn_type == RewardFunctionType.Min:
return min(rewards)
elif reward_fn_type == RewardFunctionType.Max:
return max(rewards)
else:
raise Exception("Unknown reward function type")
# reads all events associated with the next event_id
def read_event_series(self, timestamp=None):
for msg in self.reader.messages():
# only support CB for now
label = None
rewards = []
current_id = None
current_payload = None
# One thing to note, the chosen action is either encoded in the ActionIds array
# in the CB payload as the first element (if DeferredAction == False)
# or its in the outcomes payload as the ActionTaken if DeferredAction == True
for i in range(msg.EventsLength()):
joined_event = msg.Events(i).EventAsNumpy()
evt = Event.GetRootAsEvent(joined_event, 0)
m = evt.Meta()
event_payload = evt.PayloadAsNumpy()
if m.Encoding() == EventEncoding.Zstd:
event_payload = zstd.decompress(event_payload)
if m.PayloadType() == PayloadType.CB:
# ew gross
if label is not None:
cost = -1*self.apply_reward_fn(rewards)
yield current_payload, label, cost
current_id = m.Id()
current_payload, label = self.parse_cb(event_payload)
elif m.PayloadType() == PayloadType.Outcome:
tmpreward, tmplabel = self.parse_outcome(event_payload)
if tmpreward is not None:
rewards.append(tmpreward)
if tmplabel is not None:
label = tmplabel
elif m.PayloadType() == PayloadType.DedupInfo:
raise Exception("Not Implemented")
continue
else:
raise Exception('unknown payload type')
return current_payload, label, cost
| 39.777311
| 119
| 0.64371
|
ded52f147325f3bb21f1ca4b8c36c6dbbd446ea8
| 3,418
|
py
|
Python
|
great_expectations/types/__init__.py
|
denimalpaca/great_expectations
|
0f28f3c2b3cc6fae3bc5d257c6d4d13dbcf37df0
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/types/__init__.py
|
denimalpaca/great_expectations
|
0f28f3c2b3cc6fae3bc5d257c6d4d13dbcf37df0
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/types/__init__.py
|
denimalpaca/great_expectations
|
0f28f3c2b3cc6fae3bc5d257c6d4d13dbcf37df0
|
[
"Apache-2.0"
] | null | null | null |
import copy
from enum import Enum
from .configurations import ClassConfig
class DictDot:
"""A convenience class for migrating away from untyped dictionaries to stronger typed objects.
Can be instantiated with arguments:
my_A = MyClassA(
foo="a string",
bar=1,
)
Can be instantiated from a dictionary:
my_A = MyClassA(
**{
"foo": "a string",
"bar": 1,
}
)
Can be accessed using both dictionary and dot notation
my_A.foo == "a string"
my_A.bar == 1
my_A["foo"] == "a string"
my_A["bar"] == 1
Pairs nicely with @dataclass:
@dataclass()
class MyClassA(DictDot):
foo: str
bar: int
Can be made immutable:
@dataclass(frozen=True)
class MyClassA(DictDot):
foo: str
bar: int
For more examples of usage, please see `test_dataclass_serializable_dot_dict_pattern.py` in the tests folder.
"""
def __getitem__(self, item):
if isinstance(item, int):
return list(self.__dict__.keys())[item]
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
def __delitem__(self, key):
delattr(self, key)
def __contains__(self, key):
return hasattr(self, key)
def __len__(self):
return len(self.__dict__)
def keys(self):
return self.to_dict().keys()
def values(self):
return self.to_dict().values()
def items(self):
return self.to_dict().items()
def get(self, key, default_value=None):
if self.__contains__(key=key):
return self.__getitem__(item=key)
return self.__dict__.get(key, default_value)
def to_dict(self):
"""Convert this object into a standard dictionary, recursively.
This is often convenient for serialization, and in cases where an untyped version of the object is required.
"""
new_dict = copy.deepcopy(self.__dict__)
# This is needed to play nice with pydantic.
if "__initialised__" in new_dict:
del new_dict["__initialised__"]
# DictDot's to_dict method works recursively, when a DictDot contains other DictDots.
for key, value in new_dict.items():
# Recursive conversion works on keys that are DictDots...
if isinstance(value, DictDot):
new_dict[key] = value.to_dict()
# ...and Enums...
elif isinstance(value, Enum):
new_dict[key] = value.value
# ...and when DictDots and Enums are nested one layer deeper in lists or tuples
if isinstance(value, list) or isinstance(value, tuple):
new_dict[key] = [temp_element for temp_element in value]
for i, element in enumerate(value):
if isinstance(element, DictDot):
new_dict[key][i] = element.to_dict()
elif isinstance(element, Enum):
new_dict[key][i] = element.value
# Note: conversion will not work automatically if there are additional layers in between.
return new_dict
class SerializableDictDot(DictDot):
def to_json_dict(self) -> dict:
raise NotImplementedError
| 28.247934
| 116
| 0.587771
|
6077da341a91bf58961a82639c5582444b79e7ab
| 9,495
|
py
|
Python
|
heap/commands.py
|
dsanders11/gdb-heapanalyzer
|
f512f63387d589504e646dfcce56e5d220ce73d0
|
[
"MIT"
] | 1
|
2016-12-10T14:46:19.000Z
|
2016-12-10T14:46:19.000Z
|
heap/commands.py
|
dsanders11/gdb-heapanalyzer
|
f512f63387d589504e646dfcce56e5d220ce73d0
|
[
"MIT"
] | null | null | null |
heap/commands.py
|
dsanders11/gdb-heapanalyzer
|
f512f63387d589504e646dfcce56e5d220ce73d0
|
[
"MIT"
] | null | null | null |
"""Common GDB commands for heap analysis. Once a heap implementation is identified
further commands are registered.
"""
import _gdb as gdb
import functools
from _heap import UnsupportedHeap
# Always import any non-standard GDB helpers from _gdb
from _gdb import is_debuginfo_loaded, is_inferior_running, is_inferior_coredump
# XXX - GDB forces us to butcher our doc strings to make them show up right as
# help messages in GDB. They should be parsing the docstrings according to PEP 257
def requires_debuginfo(debuginfo):
"""Decorator for commands which require certain debug info to function
This decorator was inspired by the original one in gdb-heap, but improved
to add more versatility and functionality.
"""
assert not callable(debuginfo), \
"Programming error, you're probably using this decorator wrong, it requires an argument"
debuginfo_cache = {}
def decorator(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
debuginfo_found = debuginfo_cache.setdefault(debuginfo, is_debuginfo_loaded(debuginfo))
if not debuginfo_found:
print "Missing debuginfo for {0}".format(debuginfo)
print "Suggested fix:"
print " debuginfo-install {0}".format(debuginfo)
else:
func(*args, **kwargs)
return new_func
return decorator
def requires_running_or_core(msg="Inferior must be a running process or core dump"):
"""Decorator for commands which need a running program, or a core dump"""
assert not callable(msg), \
"Programming error, you're probably using this decorator wrong, it requires an argument"
def decorator(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
inferior = gdb.selected_inferior()
if not is_inferior_running(inferior) and not is_inferior_coredump(inferior):
print msg
else:
func(*args, **kwargs)
return new_func
return decorator
class CommandBase(gdb.Command):
"""Base class for GDB commands
Provides some basic functionality such as saving the command name and
other arguments. More functionality may be added later, so it's a good idea
to derive any new commands from this base class or one of it's subclasses.
"""
_prefix_cache = []
def __init__(self, name, command_type, **kwargs):
completer_class = kwargs.pop("completer_class", gdb.COMPLETE_NONE)
prefix = kwargs.pop("prefix", False)
existing_prefix = kwargs.pop("existing_prefix", False)
if len(kwargs) > 0:
error_msg = "__init__() got an unexpected keyword argument '{0}'"
raise TypeError(error_msg.format(kwargs.keys()[0]))
super(CommandBase, self).__init__(
name, command_type, completer_class=completer_class, prefix=prefix)
name_chunks = name.rsplit()
command_prefix = ""
if len(name_chunks) > 1:
command_prefix = ''.join(name_chunks[:-1])
if not CommandBase._prefix_cache.count(command_prefix) and not existing_prefix:
# Not an existing prefix (such as a built in command), and not in our
# cache, so this is more than likely a programming error
#
# XXX - The reason we throw a fit here is because GDB seems to silently
# ignore the middle words and turn things like "heap extra command" into
# "heap command", which is not what anyone intended. To make things worse,
# the help for the prefix will list "heap command", but running the command
# "help heap command" will try to list subcommands of "heap extra command"
# if "heap extra command" was created as a prefix command itself. So, we're
# trying to prevent confusion cause by this weird GDB behavior
msg = ("Programming error: Trying to create a command with an unknown prefix, "
"see note above this assertion")
assert False, msg
if prefix:
# If we got to here, it's a new prefix, so add it to our cache
CommandBase._prefix_cache.append(name)
self.is_prefix = prefix
self.command_name = name
self.command_prefix = command_prefix
self.command_type = command_type
self.completer_class = completer_class
class PrefixCommandBase(CommandBase):
"""Base class for a prefix command
This class's __init__ simply passes all arguments through to CommandBase,
except for prefix, so argument order and kwargs can be found on that class.
Supplies a basic invoke for prefix commands which really just complains
if it is called, since the prefix command shouldn't be invokable by itself.
If this behavior is not desirable, supply your own invoke method.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault("prefix", True)
kwargs["prefix"] = True
super(PrefixCommandBase, self).__init__(*args, **kwargs)
def invoke(self, args, from_tty):
# If invoke gets called for a prefix command then the user
# must have added an unknown sub command at the end
# TBD - How does GDB handle nested prefix commands? Does each
# prefix command get invoked, or only the last one? For now
# we'll just assume the latter until proven otherwise
arg_list = gdb.string_to_argv(args)
if arg_list:
message_template = "Undefined {0} command: \"{1}\". Try \"help {0}\"."
else:
message_template = "\"{0}\" must be followed by the name of a {0} command"
# You're allowed to pass more arguments then there are slots to format
print message_template.format(self.command_name, *arg_list)
class AnalyzerCommandBase(CommandBase):
"""Base class for a command which uses a heap analyzer
This class's __init__ simply passes all arguments except the first through
to CommandBase, so argument order and kwargs can be found on that class.
One of the benefits of this base class is it checks for a valid analyzer
when invoking the command, and complains if it's not valid. If that isn't
desirable behavior then don't call super(...).invoke(...) in your invoke.
"""
def __init__(self, analyzer, *args, **kwargs):
super(AnalyzerCommandBase, self).__init__(*args, **kwargs)
assert analyzer is not None
self.analyzer = analyzer
def invoke(self, args, from_tty):
if not self.analyzer.is_valid():
print "Heap information is out of date, re-run \"heap analyze\""
return
class HeapCommand(PrefixCommandBase):
"""Commands for analyzing the heap of the current inferior. Available commands
differ by heap implementation, and may become unavailable when switching inferiors."""
def __init__(self):
super(HeapCommand, self).__init__("heap", gdb.COMMAND_DATA)
class HeapAnalyzeCommand(CommandBase):
"""Analyze the heap. This must be called any time the heap changes.
Analyzing the heap may take several seconds for multi-gigabyte heaps"""
def __init__(self, analyzer_state):
super(HeapAnalyzeCommand, self).__init__("heap analyze", gdb.COMMAND_DATA)
assert analyzer_state is not None
self.analyzer_state = analyzer_state
@requires_running_or_core("Can only analyze heap for a running process or core dump")
def invoke(self, args, from_tty):
self.dont_repeat()
if args != "":
print "Command takes no arguments"
return
a_state = self.analyzer_state
current_analyzer = a_state.get_current_analyzer()
if current_analyzer is None:
# Detect the heap
current_analyzer = a_state.detect_heap()
# Activate the commands for it
if current_analyzer is not UnsupportedHeap:
current_analyzer.activate_commands()
# Perform the analyze
current_analyzer.analyze()
elif not current_analyzer.is_valid():
# Reanalyze the heap
current_analyzer.analyze()
else:
response = raw_input(("Heap already analyzed and appears valid. "
"Are you sure you want to reanalyze? [y/n] "))
if response == "Y" or response == "y":
# Reanalyze the heap
current_analyzer.analyze()
class HeapInfoCommand(CommandBase):
"""Info on the heap implementation"""
def __init__(self, analyzer_state):
super(HeapInfoCommand, self).__init__("heap info", gdb.COMMAND_DATA)
assert analyzer_state is not None
self.analyzer_state = analyzer_state
def invoke(self, args, from_tty):
if args != "":
print "Command takes no arguments"
return
current_analyzer = self.analyzer_state.get_current_analyzer()
if current_analyzer is None:
print "Heap not yet analyzed"
elif current_analyzer is UnsupportedHeap:
print "Unknown heap implementation"
else:
print current_analyzer.get_heap_description()
def activate_basic_commands(analyzer_state):
HeapCommand()
HeapInfoCommand(analyzer_state)
HeapAnalyzeCommand(analyzer_state)
| 35.561798
| 99
| 0.658031
|
ea8589626c03fecd4df2b76dc2d824c5e67fdbdd
| 1,673
|
py
|
Python
|
logsandra/controllers/error.py
|
thobbs/logsandra
|
a17abc995dcb0573f3db2f714c1b47d3aff8b20a
|
[
"MIT"
] | 7
|
2015-05-18T13:00:54.000Z
|
2018-08-06T08:27:57.000Z
|
logsandra/controllers/error.py
|
thobbs/logsandra
|
a17abc995dcb0573f3db2f714c1b47d3aff8b20a
|
[
"MIT"
] | null | null | null |
logsandra/controllers/error.py
|
thobbs/logsandra
|
a17abc995dcb0573f3db2f714c1b47d3aff8b20a
|
[
"MIT"
] | 4
|
2015-06-16T11:09:53.000Z
|
2020-04-27T19:25:57.000Z
|
import cgi
from paste.urlparser import PkgResourcesParser
from pylons.middleware import error_document_template
from webhelpers.html.builder import literal
from logsandra.lib.base import BaseController
class ErrorController(BaseController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def document(self):
"""Render the error document"""
request = self._py_object.request
resp = request.environ.get('pylons.original_response')
content = literal(resp.body) or cgi.escape(request.GET.get('message', ''))
page = error_document_template % \
dict(prefix=request.environ.get('SCRIPT_NAME', ''),
code=cgi.escape(request.GET.get('code', str(resp.status_int))),
message=content)
return page
def img(self, id):
"""Serve Pylons' stock images"""
return self._serve_file('/'.join(['media/img', id]))
def style(self, id):
"""Serve Pylons' stock stylesheets"""
return self._serve_file('/'.join(['media/style', id]))
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
request = self._py_object.request
request.environ['PATH_INFO'] = '/%s' % path
return PkgResourcesParser('pylons', 'pylons')(request.environ, self.start_response)
| 37.177778
| 91
| 0.674238
|
e10e27f7612e63183206990e6da0c8d17021f668
| 22,181
|
py
|
Python
|
sklearn/gaussian_process/_gpr.py
|
lacrosse91/scikit-learn
|
2325b19a86bd5b6e4b0bfb4eff4ee46a3343cf65
|
[
"BSD-3-Clause"
] | 27
|
2015-01-22T22:30:09.000Z
|
2022-02-15T07:33:06.000Z
|
sklearn/gaussian_process/_gpr.py
|
lacrosse91/scikit-learn
|
2325b19a86bd5b6e4b0bfb4eff4ee46a3343cf65
|
[
"BSD-3-Clause"
] | 5
|
2015-02-24T14:57:35.000Z
|
2018-07-04T19:00:24.000Z
|
sklearn/gaussian_process/_gpr.py
|
lacrosse91/scikit-learn
|
2325b19a86bd5b6e4b0bfb4eff4ee46a3343cf65
|
[
"BSD-3-Clause"
] | 25
|
2015-07-30T13:47:25.000Z
|
2021-08-03T07:48:38.000Z
|
"""Gaussian processes regression."""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Modified by: Pete Green <p.l.green@liverpool.ac.uk>
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve
import scipy.optimize
from ..base import BaseEstimator, RegressorMixin, clone
from ..base import MultiOutputMixin
from .kernels import RBF, ConstantKernel as C
from ..preprocessing._data import _handle_zeros_in_scale
from ..utils import check_random_state
from ..utils.optimize import _check_optimize_result
class GaussianProcessRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method `sample_y(X)`, which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method `log_marginal_likelihood(theta)`, which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel instance, default=None
The kernel specifying the covariance function of the GP. If None is
passed, the kernel ``ConstantKernel(1.0, constant_value_bounds="fixed"
* RBF(1.0, length_scale_bounds="fixed")`` is used as default. Note that
the kernel hyperparameters are optimized during fitting unless the
bounds are marked as "fixed".
alpha : float or ndarray of shape (n_samples,), default=1e-10
Value added to the diagonal of the kernel matrix during fitting.
This can prevent a potential numerical issue during fitting, by
ensuring that the calculated values form a positive definite matrix.
It can also be interpreted as the variance of additional Gaussian
measurement noise on the training observations. Note that this is
different from using a `WhiteKernel`. If an array is passed, it must
have the same number of entries as the data used for fitting and is
used as datapoint-dependent noise level. Allowing to specify the
noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : "fmin_l_bfgs_b" or callable, default="fmin_l_bfgs_b"
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func': the objective function to be minimized, which
# takes the hyperparameters theta as a parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, default=0
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : bool, default=False
Whether the target values y are normalized, the mean and variance of
the target values are set equal to 0 and 1 respectively. This is
recommended for cases where zero-mean, unit-variance priors are used.
Note that, in this implementation, the normalisation is reversed
before the GP predictions are reported.
.. versionchanged:: 0.23
copy_X_train : bool, default=True
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
X_train_ : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data (also
required for prediction).
y_train_ : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values in training data (also required for prediction)
kernel_ : kernel instance
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like of shape (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like of shape (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680...
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0..., 592.1...]), array([316.6..., 316.6...]))
"""
def __init__(
self,
kernel=None,
*,
alpha=1e-10,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0,
normalize_y=False,
copy_X_train=True,
random_state=None,
):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
if self.kernel_.requires_vector_input:
X, y = self._validate_data(
X, y, multi_output=True, y_numeric=True, ensure_2d=True, dtype="numeric"
)
else:
X, y = self._validate_data(
X, y, multi_output=True, y_numeric=True, ensure_2d=False, dtype=None
)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
self._y_train_std = _handle_zeros_in_scale(np.std(y, axis=0), copy=False)
# Remove mean and make unit variance
y = (y - self._y_train_mean) / self._y_train_std
else:
self._y_train_mean = np.zeros(1)
self._y_train_std = 1
if np.iterable(self.alpha) and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError(
"alpha must be a scalar or an array "
"with same number of entries as y. (%d != %d)"
% (self.alpha.shape[0], y.shape[0])
)
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False
)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta, clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [
(
self._constrained_optimization(
obj_func, self.kernel_.theta, self.kernel_.bounds
)
)
]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite."
)
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial, bounds)
)
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.kernel_._check_bounds_params()
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
self.kernel_.theta, clone_kernel=False
)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError as exc:
exc.args = (
"The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'alpha' parameter of your "
"GaussianProcessRegressor estimator." % self.kernel_,
) + exc.args
raise
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, optionally also
returns its standard deviation (`return_std=True`) or covariance
(`return_cov=True`). Note that at most one of the two can be requested.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated.
return_std : bool, default=False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default=False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
Returns
-------
y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
Mean of predictive distribution a query points.
y_std : ndarray of shape (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when `return_std` is True.
y_cov : ndarray of shape (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when `return_cov` is True.
"""
if return_std and return_cov:
raise RuntimeError(
"At most one of return_std or return_cov can be requested."
)
if self.kernel is None or self.kernel.requires_vector_input:
X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False)
else:
X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
kernel = self.kernel
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = kernel(X)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
# undo normalisation
y_mean = self._y_train_std * y_mean + self._y_train_mean
if return_cov:
# Solve K @ V = K_trans.T
V = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(V) # Line 6
# undo normalisation
y_cov = y_cov * self._y_train_std ** 2
return y_mean, y_cov
elif return_std:
# Solve K @ V = K_trans.T
V = cho_solve((self.L_, True), K_trans.T) # Line 5
# Compute variance of predictive distribution
# Use einsum to avoid explicitly forming the large matrix
# K_trans @ V just to extract its diagonal afterward.
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ij,ji->i", K_trans, V)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn(
"Predicted variances smaller than 0. "
"Setting those variances to 0."
)
y_var[y_var_negative] = 0.0
# undo normalisation
y_var = y_var * self._y_train_std ** 2
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Query points where the GP is evaluated.
n_samples : int, default=1
Number of samples drawn from the Gaussian process per query point
random_state : int, RandomState instance or None, default=0
Determines random number generation to randomly draw samples.
Pass an int for reproducible results across multiple function
calls.
See :term:`Glossary <random_state>`.
Returns
-------
y_samples : ndarray of shape (n_samples_X, n_samples), or \
(n_samples_X, n_targets, n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = [
rng.multivariate_normal(y_mean[:, i], y_cov, n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])
]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,) default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = 0.5 * np.einsum(
"ijl,jik->kl", tmp, K_gradient
)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
opt_res = scipy.optimize.minimize(
obj_func, initial_theta, method="L-BFGS-B", jac=True, bounds=bounds
)
_check_optimize_result("lbfgs", opt_res)
theta_opt, func_min = opt_res.x, opt_res.fun
elif callable(self.optimizer):
theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
def _more_tags(self):
return {"requires_fit": False}
| 41.228625
| 88
| 0.612281
|
8dc438c9ee12b80ab3e54f0dae398b63d390490a
| 15,559
|
py
|
Python
|
tests/stores/test_mongolike.py
|
munrojm/maggma
|
baff06b1682d82f7275d6fba9495c50f5f28ebb1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
tests/stores/test_mongolike.py
|
munrojm/maggma
|
baff06b1682d82f7275d6fba9495c50f5f28ebb1
|
[
"BSD-3-Clause-LBNL"
] | 69
|
2020-09-10T22:09:46.000Z
|
2021-05-24T05:40:52.000Z
|
tests/stores/test_mongolike.py
|
munrojm/maggma
|
baff06b1682d82f7275d6fba9495c50f5f28ebb1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import os
import shutil
from datetime import datetime
from unittest import mock
import mongomock.collection
from monty.tempfile import ScratchDir
import pymongo.collection
import pytest
from pymongo.errors import ConfigurationError, DocumentTooLarge, OperationFailure
import maggma.stores
from maggma.core import StoreError
from maggma.stores import JSONStore, MemoryStore, MongoStore, MongoURIStore
from maggma.stores.mongolike import MontyStore
from maggma.validators import JSONSchemaValidator
@pytest.fixture
def mongostore():
store = MongoStore(
database="maggma_test",
collection_name="test",
)
store.connect()
yield store
store._collection.drop()
@pytest.fixture
def montystore(tmp_dir):
store = MontyStore("maggma_test")
store.connect()
return store
@pytest.fixture
def memorystore():
store = MemoryStore()
store.connect()
return store
@pytest.fixture
def jsonstore(test_dir):
files = []
for f in ["a.json", "b.json"]:
files.append(test_dir / "test_set" / f)
return JSONStore(files)
def test_mongostore_connect():
mongostore = MongoStore("maggma_test", "test")
assert mongostore._collection is None
mongostore.connect()
assert isinstance(mongostore._collection, pymongo.collection.Collection)
def test_mongostore_query(mongostore):
mongostore._collection.insert_one({"a": 1, "b": 2, "c": 3})
assert mongostore.query_one(properties=["a"])["a"] == 1
assert mongostore.query_one(properties=["a"])["a"] == 1
assert mongostore.query_one(properties=["b"])["b"] == 2
assert mongostore.query_one(properties=["c"])["c"] == 3
def test_mongostore_count(mongostore):
mongostore._collection.insert_one({"a": 1, "b": 2, "c": 3})
assert mongostore.count() == 1
mongostore._collection.insert_one({"aa": 1, "b": 2, "c": 3})
assert mongostore.count() == 2
assert mongostore.count({"a": 1}) == 1
def test_mongostore_distinct(mongostore):
mongostore._collection.insert_one({"a": 1, "b": 2, "c": 3})
mongostore._collection.insert_one({"a": 4, "d": 5, "e": 6, "g": {"h": 1}})
assert set(mongostore.distinct("a")) == {1, 4}
# Test list distinct functionality
mongostore._collection.insert_one({"a": 4, "d": 6, "e": 7})
mongostore._collection.insert_one({"a": 4, "d": 6, "g": {"h": 2}})
# Test distinct subdocument functionality
ghs = mongostore.distinct("g.h")
assert set(ghs) == {1, 2}
# Test when key doesn't exist
assert mongostore.distinct("blue") == []
# Test when null is a value
mongostore._collection.insert_one({"i": None})
assert mongostore.distinct("i") == [None]
# Test to make sure DocumentTooLarge errors get dealt with properly using built in distinct
mongostore._collection.insert_many([{"key": [f"mp-{i}"]} for i in range(1000000)])
vals = mongostore.distinct("key")
# Test to make sure distinct on array field is unraveled when using manual distinct
assert len(vals) == len(list(range(1000000)))
assert all([isinstance(v, str) for v in vals])
# Test to make sure manual distinct uses the criteria query
mongostore._collection.insert_many(
[{"key": f"mp-{i}", "a": 2} for i in range(1000001, 2000001)]
)
vals = mongostore.distinct("key", {"a": 2})
assert len(vals) == len(list(range(1000001, 2000001)))
def test_mongostore_update(mongostore):
mongostore.update({"e": 6, "d": 4}, key="e")
assert (
mongostore.query_one(criteria={"d": {"$exists": 1}}, properties=["d"])["d"] == 4
)
mongostore.update([{"e": 7, "d": 8, "f": 9}], key=["d", "f"])
assert mongostore.query_one(criteria={"d": 8, "f": 9}, properties=["e"])["e"] == 7
mongostore.update([{"e": 11, "d": 8, "f": 9}], key=["d", "f"])
assert mongostore.query_one(criteria={"d": 8, "f": 9}, properties=["e"])["e"] == 11
test_schema = {
"type": "object",
"properties": {"e": {"type": "integer"}},
"required": ["e"],
}
mongostore.validator = JSONSchemaValidator(schema=test_schema)
mongostore.update({"e": 100, "d": 3}, key="e")
# Continue to update doc when validator is not set to strict mode
mongostore.update({"e": "abc", "d": 3}, key="e")
# ensure safe_update works to not throw DocumentTooLarge errors
large_doc = {f"mp-{i}": f"mp-{i}" for i in range(1000000)}
large_doc["e"] = 999
with pytest.raises((OperationFailure, DocumentTooLarge)):
mongostore.update([large_doc, {"e": 1001}], key="e")
mongostore.safe_update = True
mongostore.update([large_doc, {"e": 1001}], key="e")
assert mongostore.query_one({"e": 1001}) is not None
def test_mongostore_groupby(mongostore):
mongostore.update(
[
{"e": 7, "d": 9, "f": 9},
{"e": 7, "d": 9, "f": 10},
{"e": 8, "d": 9, "f": 11},
{"e": 9, "d": 10, "f": 12},
],
key="f",
)
data = list(mongostore.groupby("d"))
assert len(data) == 2
grouped_by_9 = [g[1] for g in data if g[0]["d"] == 9][0]
assert len(grouped_by_9) == 3
grouped_by_10 = [g[1] for g in data if g[0]["d"] == 10][0]
assert len(grouped_by_10) == 1
data = list(mongostore.groupby(["e", "d"]))
assert len(data) == 3
def test_mongostore_remove_docs(mongostore):
mongostore._collection.insert_one({"a": 1, "b": 2, "c": 3})
mongostore._collection.insert_one({"a": 4, "d": 5, "e": 6, "g": {"h": 1}})
mongostore.remove_docs({"a": 1})
assert len(list(mongostore.query({"a": 4}))) == 1
assert len(list(mongostore.query({"a": 1}))) == 0
def test_mongostore_from_db_file(mongostore, db_json):
ms = MongoStore.from_db_file(db_json)
ms.connect()
assert ms._collection.full_name == "maggma_tests.tmp"
def test_mongostore_from_launchpad_file(lp_file):
ms = MongoStore.from_launchpad_file(lp_file, collection_name="tmp")
ms.connect()
assert ms._collection.full_name == "maggma_tests.tmp"
def test_mongostore_from_collection(mongostore, db_json):
ms = MongoStore.from_db_file(db_json)
ms.connect()
other_ms = MongoStore.from_collection(ms._collection)
assert ms._collection.full_name == other_ms._collection.full_name
assert ms.database == other_ms.database
def test_mongostore_name(mongostore):
assert mongostore.name == "mongo://localhost/maggma_test/test"
def test_ensure_index(mongostore):
assert mongostore.ensure_index("test_key")
# TODO: How to check for exception?
def test_mongostore_last_updated(mongostore):
assert mongostore.last_updated == datetime.min
start_time = datetime.utcnow()
mongostore._collection.insert_one({mongostore.key: 1, "a": 1})
with pytest.raises(StoreError) as cm:
mongostore.last_updated
assert cm.match(mongostore.last_updated_field)
mongostore.update(
[{mongostore.key: 1, "a": 1, mongostore.last_updated_field: datetime.utcnow()}]
)
assert mongostore.last_updated > start_time
def test_mongostore_newer_in(mongostore):
target = MongoStore("maggma_test", "test_target")
target.connect()
# make sure docs are newer in mongostore then target and check updated_keys
target.update(
[
{mongostore.key: i, mongostore.last_updated_field: datetime.utcnow()}
for i in range(10)
]
)
# Update docs in source
mongostore.update(
[
{mongostore.key: i, mongostore.last_updated_field: datetime.utcnow()}
for i in range(10)
]
)
assert len(target.newer_in(mongostore)) == 10
assert len(target.newer_in(mongostore, exhaustive=True)) == 10
assert len(mongostore.newer_in(target)) == 0
target._collection.drop()
# Memory store tests
def test_memory_store_connect():
memorystore = MemoryStore()
assert memorystore._collection is None
memorystore.connect()
assert isinstance(memorystore._collection, mongomock.collection.Collection)
def test_groupby(memorystore):
memorystore.update(
[
{"e": 7, "d": 9, "f": 9},
{"e": 7, "d": 9, "f": 10},
{"e": 8, "d": 9, "f": 11},
{"e": 9, "d": 10, "f": 12},
],
key="f",
)
data = list(memorystore.groupby("d"))
assert len(data) == 2
grouped_by_9 = [g[1] for g in data if g[0]["d"] == 9][0]
assert len(grouped_by_9) == 3
grouped_by_10 = [g[1] for g in data if g[0]["d"] == 10][0]
assert len(grouped_by_10) == 1
data = list(memorystore.groupby(["e", "d"]))
assert len(data) == 3
memorystore.update(
[
{"e": {"d": 9}, "f": 9},
{"e": {"d": 9}, "f": 10},
{"e": {"d": 9}, "f": 11},
{"e": {"d": 10}, "f": 12},
],
key="f",
)
data = list(memorystore.groupby("e.d"))
assert len(data) == 2
# Monty store tests
def test_monty_store_connect(tmp_dir):
montystore = MontyStore(collection_name="my_collection")
assert montystore._collection is None
montystore.connect()
assert montystore._collection is not None
def test_monty_store_groupby(montystore):
montystore.update(
[
{"e": 7, "d": 9, "f": 9},
{"e": 7, "d": 9, "f": 10},
{"e": 8, "d": 9, "f": 11},
{"e": 9, "d": 10, "f": 12},
],
key="f",
)
data = list(montystore.groupby("d"))
assert len(data) == 2
grouped_by_9 = [g[1] for g in data if g[0]["d"] == 9][0]
assert len(grouped_by_9) == 3
grouped_by_10 = [g[1] for g in data if g[0]["d"] == 10][0]
assert len(grouped_by_10) == 1
data = list(montystore.groupby(["e", "d"]))
assert len(data) == 3
montystore.update(
[
{"e": {"d": 9}, "f": 9},
{"e": {"d": 9}, "f": 10},
{"e": {"d": 9}, "f": 11},
{"e": {"d": 10}, "f": 12},
],
key="f",
)
data = list(montystore.groupby("e.d"))
assert len(data) == 2
def test_montystore_query(montystore):
montystore._collection.insert_one({"a": 1, "b": 2, "c": 3})
assert montystore.query_one(properties=["a"])["a"] == 1
assert montystore.query_one(properties=["a"])["a"] == 1
assert montystore.query_one(properties=["b"])["b"] == 2
assert montystore.query_one(properties=["c"])["c"] == 3
def test_montystore_count(montystore):
montystore._collection.insert_one({"a": 1, "b": 2, "c": 3})
assert montystore.count() == 1
montystore._collection.insert_one({"aa": 1, "b": 2, "c": 3})
assert montystore.count() == 2
assert montystore.count({"a": 1}) == 1
def test_montystore_distinct(montystore):
montystore._collection.insert_one({"a": 1, "b": 2, "c": 3})
montystore._collection.insert_one({"a": 4, "d": 5, "e": 6, "g": {"h": 1}})
assert set(montystore.distinct("a")) == {1, 4}
# Test list distinct functionality
montystore._collection.insert_one({"a": 4, "d": 6, "e": 7})
montystore._collection.insert_one({"a": 4, "d": 6, "g": {"h": 2}})
# Test distinct subdocument functionality
ghs = montystore.distinct("g.h")
assert set(ghs) == {1, 2}
# Test when key doesn't exist
assert montystore.distinct("blue") == []
# Test when null is a value
montystore._collection.insert_one({"i": None})
assert montystore.distinct("i") == [None]
def test_montystore_update(montystore):
montystore.update({"e": 6, "d": 4}, key="e")
assert (
montystore.query_one(criteria={"d": {"$exists": 1}}, properties=["d"])["d"] == 4
)
montystore.update([{"e": 7, "d": 8, "f": 9}], key=["d", "f"])
assert montystore.query_one(criteria={"d": 8, "f": 9}, properties=["e"])["e"] == 7
montystore.update([{"e": 11, "d": 8, "f": 9}], key=["d", "f"])
assert montystore.query_one(criteria={"d": 8, "f": 9}, properties=["e"])["e"] == 11
test_schema = {
"type": "object",
"properties": {"e": {"type": "integer"}},
"required": ["e"],
}
montystore.validator = JSONSchemaValidator(schema=test_schema)
montystore.update({"e": 100, "d": 3}, key="e")
# Continue to update doc when validator is not set to strict mode
montystore.update({"e": "abc", "d": 3}, key="e")
def test_montystore_remove_docs(montystore):
montystore._collection.insert_one({"a": 1, "b": 2, "c": 3})
montystore._collection.insert_one({"a": 4, "d": 5, "e": 6, "g": {"h": 1}})
montystore.remove_docs({"a": 1})
assert len(list(montystore.query({"a": 4}))) == 1
assert len(list(montystore.query({"a": 1}))) == 0
def test_json_store_load(jsonstore, test_dir):
jsonstore.connect()
assert len(list(jsonstore.query())) == 20
jsonstore = JSONStore(test_dir / "test_set" / "c.json.gz")
jsonstore.connect()
assert len(list(jsonstore.query())) == 20
def test_json_store_writeable(test_dir):
with ScratchDir("."):
shutil.copy(test_dir / "test_set" / "d.json", ".")
jsonstore = JSONStore("d.json", file_writable=True)
jsonstore.connect()
assert jsonstore.count() == 2
jsonstore.update({"new": "hello", "task_id": 2})
assert jsonstore.count() == 3
jsonstore.close()
jsonstore = JSONStore("d.json", file_writable=True)
jsonstore.connect()
assert jsonstore.count() == 3
jsonstore.remove_docs({"a": 5})
assert jsonstore.count() == 2
jsonstore.close()
jsonstore = JSONStore("d.json", file_writable=True)
jsonstore.connect()
assert jsonstore.count() == 2
jsonstore.close()
with mock.patch(
"maggma.stores.JSONStore.update_json_file"
) as update_json_file_mock:
jsonstore = JSONStore("d.json", file_writable=False)
jsonstore.connect()
jsonstore.update({"new": "hello", "task_id": 5})
assert jsonstore.count() == 3
jsonstore.close()
update_json_file_mock.assert_not_called()
with mock.patch(
"maggma.stores.JSONStore.update_json_file"
) as update_json_file_mock:
jsonstore = JSONStore("d.json", file_writable=False)
jsonstore.connect()
jsonstore.remove_docs({"task_id": 5})
assert jsonstore.count() == 2
jsonstore.close()
update_json_file_mock.assert_not_called()
def test_eq(mongostore, memorystore, jsonstore):
assert mongostore == mongostore
assert memorystore == memorystore
assert jsonstore == jsonstore
assert mongostore != memorystore
assert mongostore != jsonstore
assert memorystore != jsonstore
@pytest.mark.skipif(
"mongodb+srv" not in os.environ.get("MONGODB_SRV_URI", ""),
reason="requires special mongodb+srv URI",
)
def test_mongo_uri():
uri = os.environ["MONGODB_SRV_URI"]
store = MongoURIStore(uri, database="mp_core", collection_name="xas")
store.connect()
is_name = store.name is uri
# This is try and keep the secret safe
assert is_name
def test_mongo_uri_dbname_parse():
# test parsing dbname from uri
uri_with_db = "mongodb://uuu:xxxx@host:27017/fake_db"
store = MongoURIStore(uri_with_db, "test")
assert store.database == "fake_db"
uri_with_db = "mongodb://uuu:xxxx@host:27017/fake_db"
store = MongoURIStore(uri_with_db, "test", database="fake_db2")
assert store.database == "fake_db2"
uri_with_db = "mongodb://uuu:xxxx@host:27017"
with pytest.raises(ConfigurationError):
MongoURIStore(uri_with_db, "test")
| 32.347193
| 95
| 0.616299
|
346372c31f3fa9f756c1c33d566eda7a0f6a083d
| 1,038
|
py
|
Python
|
MC simulation/mcdose/test/test_weights.py
|
qihuilyu/P2T
|
6b8a24a632354d70c8ba44df717291573a5e0bd2
|
[
"MIT"
] | null | null | null |
MC simulation/mcdose/test/test_weights.py
|
qihuilyu/P2T
|
6b8a24a632354d70c8ba44df717291573a5e0bd2
|
[
"MIT"
] | null | null | null |
MC simulation/mcdose/test/test_weights.py
|
qihuilyu/P2T
|
6b8a24a632354d70c8ba44df717291573a5e0bd2
|
[
"MIT"
] | null | null | null |
import unittest
import os
from os.path import join as pjoin
from collections import namedtuple
import numpy as np
from setup_tests import test_data
from mcdose import weights
DataPair = namedtuple('DataPair', ('inputs', 'labels'))
class TestWeights(unittest.TestCase):
@classmethod
def setUpClass(cls):
inputs= np.arange(1, 11)[:, None, None, None, None] * np.ones((3, 5, 5, 1), dtype=np.float32)
cls.data = DataPair(
inputs=inputs,
labels=inputs
)
def test_sample_lin_norm_sum(self):
w = weights.sample_lin_norm_sum()(*self.data)
gt = np.arange(1,11, dtype=float)/10.0
self.assertTrue(np.allclose(w, gt), 'Arrays do not agree')
def test_sample_exp_norm_sum(self):
decay_rate = 5.0
w = weights.sample_exp_norm_sum(decay_rate)(*self.data)
gt = np.exp(decay_rate * (np.arange(1,11, dtype=float)/10.0 - 1))
self.assertTrue(np.allclose(w, gt), 'Arrays do not agree')
if __name__ == '__main__':
unittest.main()
| 28.833333
| 101
| 0.654143
|
e387da4e30e76372a170e4a41496314b4ffd808e
| 5,020
|
py
|
Python
|
barbican/api/controllers/quotas.py
|
dmend/barbican
|
5ff7b4ca1474225acabc36acedcf70a41946e6d0
|
[
"Apache-2.0"
] | 177
|
2015-01-02T09:35:53.000Z
|
2022-02-26T01:43:55.000Z
|
barbican/api/controllers/quotas.py
|
kkutysllb/barbican
|
7b14d983e0dce6dcffe9781b05c52335b8203fc7
|
[
"Apache-2.0"
] | 3
|
2015-06-23T19:07:31.000Z
|
2017-08-19T04:38:11.000Z
|
barbican/api/controllers/quotas.py
|
kkutysllb/barbican
|
7b14d983e0dce6dcffe9781b05c52335b8203fc7
|
[
"Apache-2.0"
] | 87
|
2015-01-13T17:33:40.000Z
|
2021-11-09T05:30:36.000Z
|
# Copyright (c) 2015 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from barbican import api
from barbican.api import controllers
from barbican.common import exception
from barbican.common import quota
from barbican.common import resources as res
from barbican.common import utils
from barbican.common import validators
from barbican import i18n as u
LOG = utils.getLogger(__name__)
def _project_quotas_not_found():
"""Throw exception indicating project quotas not found."""
pecan.abort(404, u._('Project quotas not found.'))
class QuotasController(controllers.ACLMixin):
"""Handles quota retrieval requests."""
def __init__(self):
LOG.debug('=== Creating QuotasController ===')
self.quota_driver = quota.QuotaDriver()
@pecan.expose(generic=True)
def index(self, **kwargs):
pecan.abort(405) # HTTP 405 Method Not Allowed as default
@index.when(method='GET', template='json')
@controllers.handle_exceptions(u._('Quotas'))
@controllers.enforce_rbac('quotas:get')
def on_get(self, external_project_id, **kwargs):
LOG.debug('=== QuotasController GET ===')
# make sure project exists
res.get_or_create_project(external_project_id)
resp = self.quota_driver.get_quotas(external_project_id)
return resp
class ProjectQuotasController(controllers.ACLMixin):
"""Handles project quota requests."""
def __init__(self, project_id):
LOG.debug('=== Creating ProjectQuotasController ===')
self.passed_project_id = project_id
self.validator = validators.ProjectQuotaValidator()
self.quota_driver = quota.QuotaDriver()
@pecan.expose(generic=True)
def index(self, **kwargs):
pecan.abort(405) # HTTP 405 Method Not Allowed as default
@index.when(method='GET', template='json')
@controllers.handle_exceptions(u._('Project Quotas'))
@controllers.enforce_rbac('project_quotas:get')
def on_get(self, external_project_id, **kwargs):
LOG.debug('=== ProjectQuotasController GET ===')
resp = self.quota_driver.get_project_quotas(self.passed_project_id)
if resp:
return resp
else:
_project_quotas_not_found()
@index.when(method='PUT', template='json')
@controllers.handle_exceptions(u._('Project Quotas'))
@controllers.enforce_rbac('project_quotas:put')
@controllers.enforce_content_types(['application/json'])
def on_put(self, external_project_id, **kwargs):
LOG.debug('=== ProjectQuotasController PUT ===')
if not pecan.request.body:
raise exception.NoDataToProcess()
api.load_body(pecan.request,
validator=self.validator)
self.quota_driver.set_project_quotas(self.passed_project_id,
kwargs['project_quotas'])
LOG.info('Put Project Quotas')
pecan.response.status = 204
@index.when(method='DELETE', template='json')
@utils.allow_all_content_types
@controllers.handle_exceptions(u._('Project Quotas'))
@controllers.enforce_rbac('project_quotas:delete')
def on_delete(self, external_project_id, **kwargs):
LOG.debug('=== ProjectQuotasController DELETE ===')
try:
self.quota_driver.delete_project_quotas(self.passed_project_id)
except exception.NotFound:
LOG.info('Delete Project Quotas - Project not found')
_project_quotas_not_found()
else:
LOG.info('Delete Project Quotas')
pecan.response.status = 204
class ProjectsQuotasController(controllers.ACLMixin):
"""Handles projects quota retrieval requests."""
def __init__(self):
LOG.debug('=== Creating ProjectsQuotaController ===')
self.quota_driver = quota.QuotaDriver()
@pecan.expose()
def _lookup(self, project_id, *remainder):
return ProjectQuotasController(project_id), remainder
@pecan.expose(generic=True)
def index(self, **kwargs):
pecan.abort(405) # HTTP 405 Method Not Allowed as default
@index.when(method='GET', template='json')
@controllers.handle_exceptions(u._('Project Quotas'))
@controllers.enforce_rbac('project_quotas:get')
def on_get(self, external_project_id, **kwargs):
resp = self.quota_driver.get_project_quotas_list(
offset_arg=kwargs.get('offset', 0),
limit_arg=kwargs.get('limit', None)
)
return resp
| 36.911765
| 76
| 0.686454
|
3ca3011b66b53e0f0b1cf41b924a0020eb37f8a8
| 2,626
|
py
|
Python
|
tests/utils/bq_test.py
|
scottaubrey/data-science-dags
|
f45c4e1bb8e538da57161c20953edca2e66ffd4f
|
[
"MIT"
] | 1
|
2021-09-15T04:47:25.000Z
|
2021-09-15T04:47:25.000Z
|
tests/utils/bq_test.py
|
scottaubrey/data-science-dags
|
f45c4e1bb8e538da57161c20953edca2e66ffd4f
|
[
"MIT"
] | 39
|
2021-06-21T05:52:43.000Z
|
2022-03-29T18:39:06.000Z
|
tests/utils/bq_test.py
|
scottaubrey/data-science-dags
|
f45c4e1bb8e538da57161c20953edca2e66ffd4f
|
[
"MIT"
] | 1
|
2021-12-23T15:36:54.000Z
|
2021-12-23T15:36:54.000Z
|
import gzip
import json
from pathlib import Path
import pandas as pd
import numpy as np
from data_science_pipeline.utils.bq import (
df_as_jsonl_file_without_null
)
class TestDfAsJsonlFileWithoutNull:
def test_should_remove_null_value(self, temp_dir: Path):
jsonl_file = temp_dir / 'data.jsonl'
df = pd.DataFrame([{'key1': 'value1', 'key2': None}])
with df_as_jsonl_file_without_null(df, gzip_enabled=False) as jsonl_file:
result = [
json.loads(line)
for line in Path(jsonl_file).read_text().splitlines()
]
assert result == [{'key1': 'value1'}]
def test_should_remove_np_nan_value(self, temp_dir: Path):
jsonl_file = temp_dir / 'data.jsonl'
df = pd.DataFrame([{'key1': 'value1', 'key2': np.nan}])
with df_as_jsonl_file_without_null(df, gzip_enabled=False) as jsonl_file:
result = [
json.loads(line)
for line in Path(jsonl_file).read_text().splitlines()
]
assert result == [{'key1': 'value1'}]
def test_should_remove_null_value_from_nested_field(self, temp_dir: Path):
jsonl_file = temp_dir / 'data.jsonl'
df = pd.DataFrame([{'parent': {'key1': 'value1', 'key2': None}}])
with df_as_jsonl_file_without_null(df, gzip_enabled=False) as jsonl_file:
result = [
json.loads(line)
for line in Path(jsonl_file).read_text().splitlines()
]
assert result == [{'parent': {'key1': 'value1'}}]
def test_should_not_fail_with_list_values_field(self, temp_dir: Path):
jsonl_file = temp_dir / 'data.jsonl'
df = pd.DataFrame([{'key1': ['value1', 'value2'], 'key2': None}])
with df_as_jsonl_file_without_null(df, gzip_enabled=False) as jsonl_file:
result = [
json.loads(line)
for line in Path(jsonl_file).read_text().splitlines()
]
assert result == [{'key1': ['value1', 'value2']}]
def test_should_use_gzip_compression_by_default(self, temp_dir: Path):
jsonl_file = temp_dir / 'data.jsonl'
df = pd.DataFrame([{'key1': 'value1', 'key2': None}])
with df_as_jsonl_file_without_null(df) as jsonl_file:
assert jsonl_file.endswith('.gz')
result = [
json.loads(line)
for line in (
gzip.decompress(Path(jsonl_file).read_bytes())
.decode()
.splitlines()
)
]
assert result == [{'key1': 'value1'}]
| 38.617647
| 81
| 0.586443
|
b60de7d7a66462a79602e147c76c1b90c78a0c69
| 24,891
|
py
|
Python
|
jcvi/formats/vcf.py
|
fossabot/jcvi
|
86948affd63e94c8327cf117c47d36940b508b68
|
[
"BSD-2-Clause"
] | 1
|
2020-10-04T13:21:24.000Z
|
2020-10-04T13:21:24.000Z
|
jcvi/formats/vcf.py
|
Wangjien/jcvi
|
6732285f62dcbd7f3878e5017c3350124530c796
|
[
"BSD-2-Clause"
] | null | null | null |
jcvi/formats/vcf.py
|
Wangjien/jcvi
|
6732285f62dcbd7f3878e5017c3350124530c796
|
[
"BSD-2-Clause"
] | 1
|
2020-11-16T19:25:30.000Z
|
2020-11-16T19:25:30.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Variant call format.
"""
from __future__ import print_function
import os.path as op
import sys
import logging
from collections import defaultdict
from itertools import groupby
from pyfaidx import Fasta
from pyliftover import LiftOver
from jcvi.formats.base import must_open
from jcvi.formats.sizes import Sizes
from jcvi.utils.cbook import percentage
from jcvi.apps.base import OptionParser, ActionDispatcher, need_update, sh
class VcfLine:
def __init__(self, row):
args = row.strip().split("\t")
self.seqid = args[0]
self.pos = int(args[1])
self.rsid = args[2]
self.ref = args[3]
self.alt = args[4]
self.qual = args[5]
self.filter = args[6]
self.info = args[7]
self.format = args[8]
self.genotype = args[9]
def __str__(self):
return "\t".join(str(x) for x in (
self.seqid, self.pos, self.rsid, self.ref,
self.alt, self.qual, self.filter, self.info,
self.format, self.genotype
))
class UniqueLiftover(object):
def __init__(self, chainfile):
"""
This object will perform unique single positional liftovers - it will only lift over chromosome positions that
map unique to the new genome and if the strand hasn't changed.
Note: You should run a VCF Normalization sweep on all lifted ofer CPRAs to check for variants that need to be
re-normalized, and to remove variants where the REF now doesn't match after a liftover.
The combination of these steps will ensure high quality liftovers. However, it should be noted that this won't
prevent the situation where multiple positions in the old genome pile up uniquely in the new genome, so one
needs to check for this.
It's organised as an object rather than a collection of functions so that the LiftOver chainfile
only gets opened/passed once and not for every position to be lifted over.
:param chainfile: A string containing the path to the local UCSC .gzipped chainfile
:return:
"""
self.liftover = LiftOver(chainfile)
def liftover_cpra(self, chromosome, position, verbose=False):
"""
Given chromosome, position in 1-based co-ordinates,
This will use pyliftover to liftover a CPRA, will return a (c,p) tuple or raise NonUniqueLiftover if no unique
and strand maintaining liftover is possible
:param chromosome: string with the chromosome as it's represented in the from_genome
:param position: position on chromosome (will be cast to int)
:return: ((str) chromosome, (int) position) or None if no liftover
"""
chromosome = str(chromosome)
position = int(position)
# Perform the liftover lookup, shift the position by 1 as pyliftover deals in 0-based co-ords
new = self.liftover.convert_coordinate(chromosome, position - 1)
# This has to be here as new will be NoneType when the chromosome doesn't exist in the chainfile
if new:
# If the liftover is unique
if len(new) == 1:
# If the liftover hasn't changed strand
if new[0][2] == "+":
# Set the co-ordinates to the lifted-over ones and write out
new_chromosome = str(new[0][0])
# Shift the position forward by one to convert back to a 1-based co-ords
new_position = int(new[0][1]) + 1
return new_chromosome, new_position
else:
exception_string = "{},{} has a flipped strand in liftover: {}".format(chromosome, position, new)
else:
exception_string = "{},{} lifts over to multiple positions: {}".format(chromosome, position, new)
elif new is None:
exception_string = "Chromosome '{}' provided not in chain file".format(chromosome)
if verbose:
logging.error(exception_string)
return None, None
CM = dict(list(zip([str(x) for x in range(1, 23)],
["chr{0}".format(x) for x in range(1, 23)])) + \
[("X", "chrX"), ("Y", "chrY"), ("MT", "chrM")])
def main():
actions = (
('from23andme', 'convert 23andme file to vcf file'),
('fromimpute2', 'convert impute2 output to vcf file'),
('liftover', 'lift over coordinates in vcf file'),
('location', 'given SNP locations characterize the locations'),
('mstmap', 'convert vcf format to mstmap input'),
('refallele', 'make refAllele file'),
('sample', 'sample subset of vcf file'),
('summary', 'summarize the genotype calls in table'),
('uniq', 'retain only the first entry in vcf file'),
('validate', 'fast validation of vcf file'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def validate(args):
"""
%prog validate input.vcf genome.fasta
Fasta validation of vcf file.
"""
import pyfasta
p = OptionParser(validate.__doc__)
p.add_option("--prefix", help="Add prefix to seqid")
opts, args = p.parse_args(args)
vcffile, fastafile = args
pf = opts.prefix
genome = pyfasta.Fasta(fastafile, record_class=pyfasta.MemoryRecord)
fp = must_open(vcffile)
match_ref = match_alt = total = 0
for row in fp:
if row[0] == '#':
continue
seqid, pos, id, ref, alt = row.split()[:5]
total += 1
if pf:
seqid = pf + seqid
pos = int(pos)
if seqid not in genome:
continue
true_ref = genome[seqid][pos - 1]
if total % 100000 == 0:
print(total, "sites parsed", file=sys.stderr)
if ref == true_ref:
match_ref += 1
elif alt == true_ref:
match_alt += 1
logging.debug("Match REF: {}".format(percentage(match_ref, total)))
logging.debug("Match ALT: {}".format(percentage(match_alt, total)))
def uniq(args):
"""
%prog uniq vcffile
Retain only the first entry in vcf file.
"""
from six.moves.urllib.parse import parse_qs
p = OptionParser(uniq.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
fp = must_open(vcffile)
data = []
for row in fp:
if row[0] == '#':
print(row.strip())
continue
v = VcfLine(row)
data.append(v)
for pos, vv in groupby(data, lambda x: x.pos):
vv = list(vv)
if len(vv) == 1:
print(vv[0])
continue
bestv = max(vv, key=lambda x: float(parse_qs(x.info)["R2"][0]))
print(bestv)
def sample(args):
"""
%prog sample vcffile 0.9
Sample subset of vcf file.
"""
from random import random
p = OptionParser(sample.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
vcffile, ratio = args
ratio = float(ratio)
fp = open(vcffile)
pf = vcffile.rsplit(".", 1)[0]
kept = pf + ".kept.vcf"
withheld = pf + ".withheld.vcf"
fwk = open(kept, "w")
fww = open(withheld, "w")
nkept = nwithheld = 0
for row in fp:
if row[0] == '#':
print(row.strip(), file=fwk)
continue
if random() < ratio:
nkept += 1
print(row.strip(), file=fwk)
else:
nwithheld += 1
print(row.strip(), file=fww)
logging.debug("{0} records kept to `{1}`".format(nkept, kept))
logging.debug("{0} records withheld to `{1}`".format(nwithheld, withheld))
def get_vcfstanza(fastafile, fasta, sampleid="SAMP_001"):
from jcvi.formats.base import timestamp
# VCF spec
m = "##fileformat=VCFv4.1\n"
m += "##fileDate={0}\n".format(timestamp())
m += "##source={0}\n".format(__file__)
m += "##reference=file://{0}\n".format(op.abspath(fastafile).strip("/"))
m += '##INFO=<ID=PR,Number=0,Type=Flag,Description="Provisional genotype">\n'
m += '##INFO=<ID=IM,Number=0,Type=Flag,Description="Imputed genotype">\n'
m += '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n'
m += '##FORMAT=<ID=GP,Number=3,Type=Float,Description="Estimated Genotype Probability">\n'
header = "CHROM POS ID REF ALT QUAL FILTER INFO FORMAT\n".split() + [sampleid]
m += "#" + "\t".join(header)
return m
def fromimpute2(args):
"""
%prog fromimpute2 impute2file fastafile 1
Convert impute2 output to vcf file. Imputed file looks like:
--- 1:10177:A:AC 10177 A AC 0.451 0.547 0.002
"""
p = OptionParser(fromimpute2.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
impute2file, fastafile, chr = args
fasta = Fasta(fastafile)
print(get_vcfstanza(fastafile, fasta))
fp = open(impute2file)
seen = set()
for row in fp:
snp_id, rsid, pos, ref, alt, aa, ab, bb = row.split()
pos = int(pos)
if pos in seen:
continue
seen.add(pos)
code = max((float(aa), "0/0"), (float(ab), "0/1"), (float(bb), "1/1"))[-1]
tag = "PR" if snp_id == chr else "IM"
print("\t".join(str(x) for x in \
(chr, pos, rsid, ref, alt, ".", ".", tag, \
"GT:GP", code + ":" + ",".join((aa, ab, bb)))))
def read_rsid(seqid, legend):
if seqid in ["Y", "MT"]:
return {}
# Read rsid
fp = open(legend)
# rs145072688:10352:T:TA
register = {}
for row in fp:
atoms = row.strip().split(":")
if len(atoms) == 4:
rsid, pos, ref, alt = atoms
else:
continue
pos = int(pos)
# Use position for non-rsid
rsids = [pos] if rsid == seqid else [rsid, pos]
for rsid in rsids:
if rsid in register:
pos1, ref1, alt1 = register[rsid]
if alt not in alt1:
register[rsid][-1].append(alt)
else:
register[rsid] = (pos, ref, [alt])
logging.debug("A total of {0} sites imported from `{1}`".\
format(len(register), legend))
return register
def from23andme(args):
"""
%prog from23andme txtfile 1
Convert from23andme file to vcf file.
--ref points to the folder that contains chr1.rsids
$ zcat 1000GP_Phase3/1000GP_Phase3_chr1.legend.gz \\
| cut -d" " -f1 | grep ":" > chr1.rsids
"""
p = OptionParser(from23andme.__doc__)
p.set_ref()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
txtfile, seqid = args
ref_dir = opts.ref
fastafile = op.join(ref_dir, "hs37d5.fa")
fasta = Fasta(fastafile)
pf = txtfile.rsplit(".", 1)[0]
px = CM[seqid]
chrvcf = pf + ".{0}.vcf".format(px)
legend = op.join(ref_dir, "1000GP_Phase3/{0}.rsids".format(px))
register = read_rsid(seqid, legend)
fw = open(chrvcf, "w")
print(get_vcfstanza(fastafile, fasta, txtfile), file=fw)
fp = open(txtfile)
seen = set()
duplicates = skipped = missing = 0
for row in fp:
if row[0] == '#':
continue
rsid, chr, pos, genotype = row.split()
if chr != seqid:
continue
pos = int(pos)
if (chr, pos) in seen:
duplicates += 1
continue
seen.add((chr, pos))
genotype = list(genotype)
if "-" in genotype: # missing daa
missing += 1
continue
# Y or MT
if not register:
assert len(genotype) == 1
ref = fasta[chr][pos - 1].seq.upper()
if "D" in genotype or "I" in genotype:
skipped += 1
continue
genotype = genotype[0]
code = "0/0" if ref == genotype else "1/1"
alt = "." if ref == genotype else genotype
print("\t".join(str(x) for x in \
(chr, pos, rsid, ref, alt, ".", ".", "PR", "GT", code)), file=fw)
continue
# If rsid is seen in the db, use that
if rsid in register:
pos, ref, alt = register[rsid]
elif pos in register:
pos, ref, alt = register[pos]
else:
skipped += 1 # Not in reference panel
continue
assert fasta[chr][pos - 1:pos + len(ref) - 1].seq.upper() == ref
# Keep it bi-allelic
not_seen = [x for x in alt if x not in genotype]
while len(alt) > 1 and not_seen:
alt.remove(not_seen.pop())
if len(alt) > 1:
alt = [alt[0]]
alleles = [ref] + alt
if len(genotype) == 1:
genotype = [genotype[0]] * 2
alt = ",".join(alt) or "."
if "D" in genotype or "I" in genotype:
max_allele = max((len(x), x) for x in alleles)[1]
alleles = [("I" if x == max_allele else "D") for x in alleles]
assert "I" in alleles and "D" in alleles
a, b = genotype
try:
ia, ib = alleles.index(a), alleles.index(b)
except ValueError: # alleles not seen
logging.error("{0}: alleles={1}, genotype={2}".\
format(rsid, alleles, genotype))
skipped += 1
continue
code = "/".join(str(x) for x in sorted((ia, ib)))
print("\t".join(str(x) for x in \
(chr, pos, rsid, ref, alt, ".", ".", "PR", "GT", code)), file=fw)
logging.debug("duplicates={0} skipped={1} missing={2}".\
format(duplicates, skipped, missing))
def refallele(args):
"""
%prog refallele vcffile > out.refAllele
Make refAllele file which can be used to convert PLINK file to VCF file.
"""
p = OptionParser(refallele.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
fp = open(vcffile)
for row in fp:
if row[0] == '#':
continue
atoms = row.split()
marker = "{0}:{1}".format(*atoms[:2])
ref = atoms[3]
print("\t".join((marker, ref)))
def location(args):
"""
%prog location bedfile fastafile
Given SNP locations, summarize the locations in the sequences. For example,
find out if there are more 3`-SNPs than 5`-SNPs.
"""
from jcvi.formats.bed import BedLine
from jcvi.graphics.histogram import stem_leaf_plot
p = OptionParser(location.__doc__)
p.add_option("--dist", default=100, type="int",
help="Distance cutoff to call 5` and 3` [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
dist = opts.dist
sizes = Sizes(fastafile).mapping
fp = open(bedfile)
fiveprime = threeprime = total = 0
percentages = []
for row in fp:
b = BedLine(row)
pos = b.start
size = sizes[b.seqid]
if pos < dist:
fiveprime += 1
if size - pos < dist:
threeprime += 1
total += 1
percentages.append(100 * pos / size)
m = "Five prime (within {0}bp of start codon): {1}\n".format(dist, fiveprime)
m += "Three prime (within {0}bp of stop codon): {1}\n".format(dist, threeprime)
m += "Total: {0}".format(total)
print(m, file=sys.stderr)
bins = 10
title = "Locations within the gene [0=Five-prime, 100=Three-prime]"
stem_leaf_plot(percentages, 0, 100, bins, title=title)
def summary(args):
"""
%prog summary txtfile fastafile
The txtfile can be generated by: %prog mstmap --noheader --freq=0
Tabulate on all possible combinations of genotypes and provide results
in a nicely-formatted table. Give a fastafile for SNP rate (average
# of SNPs per Kb).
Only three-column file is supported:
locus_id intra- genotype inter- genotype
"""
from jcvi.utils.cbook import thousands
from jcvi.utils.table import tabulate
p = OptionParser(summary.__doc__)
p.add_option("--counts",
help="Print SNP counts in a txt file [default: %default]")
p.add_option("--bed",
help="Print SNPs locations in a bed file [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
txtfile, fastafile = args
bedfw = open(opts.bed, "w") if opts.bed else None
fp = open(txtfile)
header = fp.next().split() # Header
snps = defaultdict(list) # contig => list of loci
combinations = defaultdict(int)
intraSNPs = interSNPs = 0
distinctSet = set() # set of genes that show A-B pattern
ref, alt = header[1:3]
snpcounts, goodsnpcounts = defaultdict(int), defaultdict(int)
for row in fp:
atoms = row.split()
assert len(atoms) == 3, \
"Only three-column file is supported"
locus, intra, inter = atoms
ctg, pos = locus.rsplit(".", 1)
pos = int(pos)
snps[ctg].append(pos)
snpcounts[ctg] += 1
if intra == 'X':
intraSNPs += 1
if inter in ('B', 'X'):
interSNPs += 1
if intra == 'A' and inter == 'B':
distinctSet.add(ctg)
goodsnpcounts[ctg] += 1
# Tabulate all possible combinations
intra = ref + "-" + intra
inter = alt + "-" + inter
combinations[(intra, inter)] += 1
if bedfw:
print("\t".join(str(x) for x in \
(ctg, pos - 1, pos, locus)), file=bedfw)
if bedfw:
logging.debug("SNP locations written to `{0}`.".format(opts.bed))
bedfw.close()
nsites = sum(len(x) for x in snps.values())
sizes = Sizes(fastafile)
bpsize = sizes.totalsize
snprate = lambda a: a * 1000. / bpsize
m = "Dataset `{0}` contains {1} contigs ({2} bp).\n".\
format(fastafile, len(sizes), thousands(bpsize))
m += "A total of {0} SNPs within {1} contigs ({2} bp).\n".\
format(nsites, len(snps),
thousands(sum(sizes.mapping[x] for x in snps.keys())))
m += "SNP rate: {0:.1f}/Kb, ".format(snprate(nsites))
m += "IntraSNPs: {0} ({1:.1f}/Kb), InterSNPs: {2} ({3:.1f}/Kb)".\
format(intraSNPs, snprate(intraSNPs), interSNPs, snprate(interSNPs))
print(m, file=sys.stderr)
print(tabulate(combinations), file=sys.stderr)
leg = "Legend: A - homozygous same, B - homozygous different, X - heterozygous"
print(leg, file=sys.stderr)
tag = (ref + "-A", alt + "-B")
distinctSNPs = combinations[tag]
tag = str(tag).replace("'", "")
print("A total of {0} disparate {1} SNPs in {2} contigs.".\
format(distinctSNPs, tag, len(distinctSet)), file=sys.stderr)
if not opts.counts:
return
snpcountsfile = opts.counts
fw = open(snpcountsfile, "w")
header = "\t".join(("Contig", "#_SNPs", "#_AB_SNP"))
print(header, file=fw)
assert sum(snpcounts.values()) == nsites
assert sum(goodsnpcounts.values()) == distinctSNPs
for ctg in sorted(snps.keys()):
snpcount = snpcounts[ctg]
goodsnpcount = goodsnpcounts[ctg]
print("\t".join(str(x) for x in (ctg, snpcount, goodsnpcount)), file=fw)
fw.close()
logging.debug("SNP counts per contig is written to `{0}`.".\
format(snpcountsfile))
g2x = {"0/0": 'A', "0/1": 'X', "1/1": 'B', "./.": '-', ".": '-'}
def encode_genotype(s, mindepth=3, depth_index=2, nohet=False):
"""
>>> encode_genotype("1/1:128,18,0:6:18") # homozygote B
'B'
>>> encode_genotype("0/1:0,0,0:0:3") # missing data
'-'
>>> encode_genotype("0/1:128,0,26:7:22") # heterozygous A/B
'X'
"""
atoms = s.split(":")
if len(atoms) < 3:
return g2x[atoms[0]]
inferred = atoms[0]
depth = int(atoms[depth_index])
if depth < mindepth:
return '-'
if inferred == '0/0':
return 'A'
if inferred == '0/1':
return '-' if nohet else 'X'
if inferred == '1/1':
return 'B'
return '-'
def mstmap(args):
"""
%prog mstmap bcffile/vcffile > matrixfile
Convert bcf/vcf format to mstmap input.
"""
from jcvi.assembly.geneticmap import MSTMatrix
p = OptionParser(mstmap.__doc__)
p.add_option("--dh", default=False, action="store_true",
help="Double haploid population, no het [default: %default]")
p.add_option("--freq", default=.2, type="float",
help="Allele must be above frequency [default: %default]")
p.add_option("--mindepth", default=3, type="int",
help="Only trust genotype calls with depth [default: %default]")
p.add_option("--missing_threshold", default=.25, type="float",
help="Fraction missing must be below")
p.add_option("--noheader", default=False, action="store_true",
help="Do not print MSTmap run parameters [default: %default]")
p.add_option("--pv4", default=False, action="store_true",
help="Enable filtering strand-bias, tail distance bias, etc. "
"[default: %default]")
p.add_option("--freebayes", default=False, action="store_true",
help="VCF output from freebayes")
p.set_sep(sep=".", help="Use separator to simplify individual names")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
if vcffile.endswith(".bcf"):
bcffile = vcffile
vcffile = bcffile.rsplit(".", 1)[0] + ".vcf"
cmd = "bcftools view {0}".format(bcffile)
cmd += " | vcfutils.pl varFilter"
if not opts.pv4:
cmd += " -1 0 -2 0 -3 0 -4 0 -e 0"
if need_update(bcffile, vcffile):
sh(cmd, outfile=vcffile)
freq = opts.freq
sep = opts.sep
depth_index = 1 if opts.freebayes else 2
ptype = "DH" if opts.dh else "RIL6"
nohet = ptype == "DH"
fp = open(vcffile)
genotypes = []
for row in fp:
if row[:2] == "##":
continue
atoms = row.split()
if row[0] == '#':
ind = [x.split(sep)[0] for x in atoms[9:]]
nind = len(ind)
mh = ["locus_name"] + ind
continue
marker = "{0}.{1}".format(*atoms[:2])
geno = atoms[9:]
geno = [encode_genotype(x, mindepth=opts.mindepth,
depth_index=depth_index,
nohet=nohet) for x in geno]
assert len(geno) == nind
f = 1. / nind
if geno.count("A") * f < freq:
continue
if geno.count("B") * f < freq:
continue
if geno.count("-") * f > opts.missing_threshold:
continue
genotype = [marker] + geno
genotypes.append(genotype)
mm = MSTMatrix(genotypes, mh, ptype, opts.missing_threshold)
mm.write(opts.outfile, header=(not opts.noheader))
def liftover(args):
"""
%prog liftover old.vcf hg19ToHg38.over.chain.gz new.vcf
Lift over coordinates in vcf file.
"""
p = OptionParser(liftover.__doc__)
p.add_option("--newid", default=False, action="store_true",
help="Make new identifiers")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
oldvcf, chainfile, newvcf = args
ul = UniqueLiftover(chainfile)
num_excluded = 0
fp = open(oldvcf)
fw = open(newvcf, "w")
for row in fp:
row = row.strip()
if row[0] == '#':
if row.startswith("##source="):
row = "##source={0}".format(__file__)
elif row.startswith("##reference="):
row = "##reference=hg38"
elif row.startswith("##contig="):
continue
print(row.strip(), file=fw)
continue
v = VcfLine(row)
# GRCh37.p2 has the same MT sequence as hg38 (but hg19 is different)
if v.seqid == "MT":
v.seqid = "chrM"
print(v, file=fw)
continue
try:
new_chrom, new_pos = ul.liftover_cpra(CM[v.seqid], v.pos)
except:
num_excluded +=1
continue
if new_chrom != None and new_pos != None:
v.seqid, v.pos = new_chrom, new_pos
if opts.newid:
v.rsid = "{0}:{1}".format(new_chrom.replace("chr", ""), new_pos)
print(v, file=fw)
else:
num_excluded +=1
logging.debug("Excluded {0}".format(num_excluded))
if __name__ == '__main__':
main()
| 32.36801
| 118
| 0.563175
|
d895fabe825603a86c6accc1659113141f6bb4d0
| 2,940
|
py
|
Python
|
tests/mac/models/test_henke_winxray.py
|
drix00/pyxraymac
|
f9e2c4e073ff1f5d9fbfaa58b3b66c041433896a
|
[
"Apache-2.0"
] | null | null | null |
tests/mac/models/test_henke_winxray.py
|
drix00/pyxraymac
|
f9e2c4e073ff1f5d9fbfaa58b3b66c041433896a
|
[
"Apache-2.0"
] | null | null | null |
tests/mac/models/test_henke_winxray.py
|
drix00/pyxraymac
|
f9e2c4e073ff1f5d9fbfaa58b3b66c041433896a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: tests.mac.models.test_henke_winxray
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Tests for the :py:mod:`xray.mac.models.henke_winxray` module.
"""
###############################################################################
# Copyright 2021 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import unittest
import os.path
# Third party modules.
import pytest
# Local modules.
# Project modules.
from xray.mac.models.henke_winxray import MacHenkeWinxray
from xray.mac import get_current_module_path
# Globals and constants variables.
class TestMacHenkeWinxray(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
data_path = get_current_module_path(__file__, "../../../data/henke1993/winxray")
if not os.path.isdir(data_path): # pragma: no cover
pytest.skip("Data path file not found: {}".format(data_path))
self.macData = MacHenkeWinxray(data_path)
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_skeleton(self):
# self.fail("Test if the TestCase is working.")
self.assertTrue(True)
def test_constructor(self):
data_path = get_current_module_path(__file__, "../../../data/henke1993/winxray")
mac_data = MacHenkeWinxray(data_path)
self.assertTrue(mac_data.pathnameBinary != "")
self.assertTrue(mac_data.pathnameText != "")
def test_read_text_data(self):
enegies_eV, mac_cm2_g = self.macData.read_text_data(28) # noqa
self.assertEqual(500, len(enegies_eV))
self.assertEqual(500, len(mac_cm2_g))
self.assertEqual(10.0, enegies_eV[0])
self.assertEqual(98739.2, mac_cm2_g[0])
self.assertEqual(30000.0, enegies_eV[-1])
self.assertEqual(9.77398, mac_cm2_g[-1])
def test_read_binary_data(self):
enegies_eV, mac_cm2_g = self.macData.read_binary_data(28) # noqa
self.assertEqual(500, len(enegies_eV))
self.assertEqual(500, len(mac_cm2_g))
self.assertAlmostEqual(10.0, enegies_eV[0], 1)
self.assertAlmostEqual(98739.2, mac_cm2_g[0], 1)
self.assertAlmostEqual(30000.0, enegies_eV[-1], 1)
self.assertAlmostEqual(9.77398, mac_cm2_g[-1], 1)
| 29.69697
| 88
| 0.660204
|
15971d62b823e11d2f9734ca3d3a8b732a0a65a9
| 4,825
|
py
|
Python
|
src/python/components/left_panel.py
|
UBC-MDS/DSCI_532_Group_12
|
135b8544c19b7c90128d2241d7425f14beeb314a
|
[
"MIT"
] | 1
|
2021-02-21T09:46:24.000Z
|
2021-02-21T09:46:24.000Z
|
src/python/components/left_panel.py
|
lephanthuymai/DSCI_532_Group_12
|
de6fe6ebcb0ceccc2eb2c0aefb057fd485c4324e
|
[
"MIT"
] | 44
|
2021-01-13T03:03:06.000Z
|
2021-02-13T20:19:24.000Z
|
src/python/components/left_panel.py
|
lephanthuymai/DSCI_532_Group_12
|
de6fe6ebcb0ceccc2eb2c0aefb057fd485c4324e
|
[
"MIT"
] | 12
|
2021-01-14T03:24:23.000Z
|
2021-09-28T12:37:24.000Z
|
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
import locale
import altair as alt
from panel import panel
from decimal import *
locale.setlocale(locale.LC_ALL, "")
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import data_model as dm
class left_panel(panel):
"""handle all activities of the left panel"""
def __init__(self, datamodel):
super().__init__("Global", datamodel)
self.content = dbc.Col(
[
dbc.Row(dbc.Col(self.__create_total_statistics())),
dbc.Row(dbc.Col(self.__create_button_groups())),
dbc.Row(
dbc.Col(
[
html.Iframe(
id="chart_cases_ranking",
style={
"border-width": "0",
"width": "450px",
"height": "800px",
},
)
]
),
),
],
width=12,
)
def refresh(self, chart_type="confirmed"):
"""Aggregate the country level data
Args:
chart_type (string): selected chart type from buttons
Returns:
a ranking bar chart of confirmed cases/death cases/recovered cases
"""
result = self.data_reader.get_aggregated_daily_report()
confirmed_data = result[["Country_Region", "Confirmed"]]
active_data = result[["Country_Region", "Active"]]
recovered_data = result[["Country_Region", "Recovered"]]
deaths_data = result[["Country_Region", "Deaths"]]
if chart_type == "confirmed":
data = confirmed_data
elif chart_type == "active":
data = active_data
elif chart_type == "recovered":
data = recovered_data
elif chart_type == "death":
data = deaths_data
data.columns = ["Country_Region", "Cases"]
chart = self.__create_ranking_bar_chart(
data.nlargest(30, "Cases"), chart_type.title()
)
return chart
def __create_button_groups(self):
"""create buttons Confirmed/Death/Recovered/Active
Returns:
dbc.ButtonGroup
"""
button_groups = dbc.ButtonGroup(
[
dbc.Button("Confirmed", active=True, id="btn_confirmed"),
dbc.Button("Active", id="btn_active"),
dbc.Button("Death", id="btn_death"),
dbc.Button("Recovered", id="btn_recovered"),
],
size="md",
className="mr-1",
)
return button_groups
def __create_total_statistics(self):
"""retrieve global statistics
Returns:
html: all statistics
"""
data = self.data_reader.cumulative_filter()
confirmed_cases = panel.format_number(data.Confirmed)
active_cases = panel.format_number(data.Active)
deaths = panel.format_number(data.Deaths)
content = dbc.Container(
[
dbc.Row(dbc.Col(html.H5("Total Confirmed Cases: " + confirmed_cases))),
dbc.Row(
dbc.Col(html.H5("Total Deaths: " + deaths, style={"color": "red"}))
),
dbc.Row(dbc.Col(html.H5("Total Active Cases: " + active_cases))),
]
)
return content
def __create_ranking_bar_chart(self, data, type):
"""create bar chart to rank countries by case type
Args:
data (dataframe): dataset
type (string): "confirmed", "death", "recovered
Returns:
altair barchart
"""
chart = (
alt.Chart(
data,
title=alt.TitleParams(
text="Top 30 Countries", subtitle="By " + type + " Cases"
),
width=290,
)
.mark_bar()
.encode(
x=alt.X("Cases", title=" ", axis=alt.Axis(labels=False)),
y=alt.Y("Country_Region", sort="-x", title=" "),
color=alt.Color("Cases", scale=alt.Scale(scheme="orangered")),
tooltip=alt.Tooltip(["Cases:Q"], format=",.0f"),
)
.configure_axis(grid=False)
.configure_title(anchor="start")
.configure_legend(orient="bottom")
)
return chart.to_html()
| 32.166667
| 87
| 0.521865
|
d3e9dbba139f17e1b43489435dfd6ee16ffe799d
| 12,317
|
py
|
Python
|
scripts/merge_tract_cat.py
|
LSSTDESC/SSim_DC2
|
a6ff636ea6f779edc7ddf7b91449336e10e14b4a
|
[
"BSD-3-Clause"
] | 6
|
2018-08-02T18:29:14.000Z
|
2020-08-05T04:00:01.000Z
|
scripts/merge_tract_cat.py
|
LSSTDESC/SSim_DC2
|
a6ff636ea6f779edc7ddf7b91449336e10e14b4a
|
[
"BSD-3-Clause"
] | 232
|
2018-07-30T19:05:24.000Z
|
2022-02-24T06:14:42.000Z
|
scripts/merge_tract_cat.py
|
LSSTDESC/SSim_DC2
|
a6ff636ea6f779edc7ddf7b91449336e10e14b4a
|
[
"BSD-3-Clause"
] | 3
|
2018-10-04T11:52:52.000Z
|
2019-05-25T04:28:31.000Z
|
import os
import re
import sys
import numpy as np
import pandas as pd
from lsst.daf.persistence import Butler
from lsst.daf.persistence.butlerExceptions import NoResults
def valid_identifier_name(name):
"""Return a valid Python identifier name from input string.
For now just strips commas and spaces
The full regex to satisfy is
good_identifier_name = "^[a-zA-Z_][a-zA-Z0-9_]*$"
But that doesn't define the prescription for creating a good one.
>>> valid_identifier_name('coadd_4849_1,1')
'coadd_4849_11'
>>> valid_identifier_name('coadd_4849_1,1_2,3')
'coadd_4849_11_23'
>>> valid_identifier_name('coadd_48^49_1,1_2 3;')
'coadd_4849_11_23'
>>> valid_identifier_name('2234coadd_48^49_1,1_2 3;')
'coadd_4849_11_23'
>>> valid_identifier_name('2234,48^49 3;')
''
"""
remove_characters_regex = '[^a-zA-Z0-9_]'
name = re.sub(remove_characters_regex, '', name)
# Remove beginning characters that are numbers
name = re.sub('^[0-9]*', '', name)
return name
def load_and_save_tract(repo, tract, filename, key_prefix='coadd', patches=None,
overwrite=True, verbose=False, **kwargs):
"""Save catalogs to HDF5 from forced-photometry coadds across available filters.
Iterates through patches, saving each in append mode to the save HDF5 file.
Parameters
--
repo: str
File location of Butler repository+rerun to load.
tract: int
Tract of sky region to load
filename: str
Filename for HDF file.
key_prefix: str
Base for the key in the HDF file.
Keys will be of the form "%s_%d_%s" % (keybase, tract, patch)
With the addition that the comma will be removed from the patch name
to provide a valid Python identifier: e.g., 'coadd_4849_11'
overwrite: bool
Overwrite an existing HDF file.
"""
butler = Butler(repo)
if patches is None:
# Extract the patches for this tract from the skymap
skymap = butler.get(datasetType='deepCoadd_skyMap')
patches = ['%d,%d' % patch.getIndex() for patch in skymap[tract]]
for patch in patches:
if verbose:
print("Processing tract %d, patch %s" % (tract, patch))
patch_merged_cat = load_patch(butler, tract, patch, verbose=verbose, **kwargs)
if len(patch_merged_cat) == 0:
if verbose:
print(" No good entries for tract %d, patch %s" % (tract, patch))
continue
key = '%s_%d_%s' % (key_prefix, tract, patch)
key = valid_identifier_name(key)
patch_merged_cat.to_hdf(filename, key, format='fixed')
def load_tract(repo, tract, patches=None, **kwargs):
"""Merge catalogs from forced-photometry coadds across available filters.
Parameters
--
tract: int
Tract of sky region to load
repo: str
File location of Butler repository+rerun to load.
patches: list of str
List of patches. If not specified, will default to '0,0'--'7,7'.
Returns
--
Pandas DataFrame of merged catalog
"""
butler = Butler(repo)
if patches is None:
# Extract the patches for this tract from the skymap
skymap = butler.get(datasetType='deepCoadd_skyMap')
patches = ['%d,%d' % patch.getIndex() for patch in skymap[tract]]
merged_tract_cat = pd.DataFrame()
for patch in patches:
this_patch_merged_cat = load_patch(butler, tract, patch, **kwargs)
merged_tract_cat.append(this_patch_merged_cat)
return merged_tract_cat
def load_patch(butler_or_repo, tract, patch,
fields_to_join=('id',),
filters={'u': 'u', 'g': 'g', 'r': 'r', 'i': 'i', 'z': 'z', 'y': 'y'},
trim_colnames_for_fits=False,
verbose=False,
debug=False
):
"""Load patch catalogs. Return merged catalog across filters.
butler_or_repo: Butler object or str
Either a Butler object or a filename to the repo
tract: int
Tract in skymap
patch: str
Patch in the tract in the skymap
fields_to_join: iterable of str
Join the catalogs for each filter on these fields
filters: iterable of str
Filter names to load
trim_colnames_for_fits: bool
Trim column names to satisfy the FITS standard character limit of <68.
Returns
--
Pandas DataFrame of patch catalog merged across filters.
"""
if isinstance(butler_or_repo, str):
butler = Butler(butler_or_repo)
else:
butler = butler_or_repo
# Define the filters and order in which to sort them.:
tract_patch_data_id = {'tract': tract, 'patch': patch}
try:
ref_table = butler.get(datasetType='deepCoadd_ref',
dataId=tract_patch_data_id)
ref_table = ref_table.asAstropy().to_pandas()
except NoResults as e:
if verbose:
print(" ", e)
return pd.DataFrame()
isPrimary = ref_table['detect_isPrimary']
ref_table = ref_table[isPrimary]
if len(ref_table) == 0:
if verbose:
print(" No good isPrimary entries for tract %d, patch %s" % (tract, patch))
return ref_table
flux_field_names_per_schema_version = {
1: {'psf_flux': 'base_PsfFlux_flux', 'psf_flux_err': 'base_PsfFlux_fluxSigma',
'modelfit_flux': 'modelfit_CModel_flux', 'modelfit_flux_err': 'modelfit_CModel_fluxSigma'},
2: {'psf_flux': 'base_PsfFlux_flux', 'psf_flux_err': 'base_PsfFlux_fluxErr',
'modelfit_flux': 'modelfit_CModel_flux', 'modelfit_flux_err': 'modelfit_CModel_fluxErr'},
3: {'psf_flux': 'base_PsfFlux_instFlux', 'psf_flux_err': 'base_PsfFlux_instFluxErr',
'modelfit_flux': 'modelfit_CModel_instFlux', 'modelfit_flux_err': 'modelfit_CModel_instFluxErr'},
}
merge_filter_cats = {}
for filt in filters:
this_data = tract_patch_data_id.copy()
this_data['filter'] = filters[filt]
try:
cat = butler.get(datasetType='deepCoadd_forced_src',
dataId=this_data)
except NoResults as e:
if verbose:
print(" ", e)
continue
if debug:
print("AFW photometry catalog schema version: {}".format(cat.schema.VERSION))
flux_names = flux_field_names_per_schema_version[cat.schema.VERSION]
# Convert the AFW table to an AstroPy table
# because it's much easier to add column to an AstroPy table
# than it is to set up a new schema for an AFW table.
# cat = cat.asAstropy()
# Try instead out converting the AFW->AstroPy->Pandas per cat
# hoping to avoid memory copy
# Then join in memory space.
cat = cat.asAstropy().to_pandas()
calib = butler.get('deepCoadd_calexp_photoCalib', this_data)
calib.setThrowOnNegativeFlux(False)
mag, mag_err = calib.getMagnitude(cat[flux_names['psf_flux']].values, cat[flux_names['psf_flux_err']].values)
cat['mag'] = mag
cat['mag_err'] = mag_err
cat['SNR'] = np.abs(cat[flux_names['psf_flux']] /
cat[flux_names['psf_flux_err']])
modelfit_mag, modelfit_mag_err = calib.getMagnitude(cat[flux_names['modelfit_flux']].values,
cat[flux_names['modelfit_flux_err']].values)
cat['modelfit_mag'] = modelfit_mag
cat['modelfit_mag_err'] = modelfit_mag_err
cat['modelfit_SNR'] = np.abs(cat[flux_names['modelfit_flux']] /
cat[flux_names['modelfit_flux_err']])
cat = cat[isPrimary]
merge_filter_cats[filt] = cat
merged_patch_cat = ref_table
for filt in filters:
if filt not in merge_filter_cats:
continue
cat = merge_filter_cats[filt]
if len(cat) < 1:
continue
# Rename duplicate columns with prefix of filter
prefix_columns(cat, filt, fields_to_skip=fields_to_join)
# Merge metadata with concatenation
merged_patch_cat = pd.merge(merged_patch_cat, cat,
on=fields_to_join,
sort=False)
if trim_colnames_for_fits:
# FITS column names can't be longer that 68 characters
# Trim here to ensure consistency across any format we write this out to
trim_long_colnames(merged_patch_cat)
return merged_patch_cat
def trim_long_colnames(cat):
"""Trim long column names in an AstroPy Table by specific replacements.
Intended to help trim down column names to satisfy the FITS standard limit
of 68 characters.
Operates on 'cat' in place.
Parameters
--
cat: AstroPy Table
"""
import re
long_short_pairs = [
('GeneralShapeletPsf', 'GSPsf'),
('DoubleShapelet', 'DS'),
('noSecondDerivative', 'NoSecDer')]
for long, short in long_short_pairs:
long_re = re.compile(long)
for col_name in cat.colnames:
if long_re.search(col_name):
new_col_name = long_re.sub(short, col_name)
cat.rename_column(col_name, new_col_name)
def prefix_columns(cat, filt, fields_to_skip=()):
"""Prefix the columns of an Pandas DataFrame with the filter name.
>>> import pandas as pd
>>> tab = pd.DataFrame({'letter': ['a', 'b'], 'number': [1, 2]})
>>> prefix_columns(tab, 'filter')
>>> print(tab)
filter_letter filter_number
------------- -------------
a 1
b 2
"""
old_colnames = list(cat.columns)
for field in fields_to_skip:
field_idx = old_colnames.index(field)
old_colnames.pop(field_idx)
transformation = {col: '%s_%s' % (filt, col) for col in old_colnames}
cat.rename(index=str, columns=transformation, inplace=True)
if __name__ == '__main__':
from argparse import ArgumentParser, RawTextHelpFormatter
usage = """
Generate merged static-sky photometry (based on deepCoadd forced photometry)
Note that the following defines the tracts for the DC2 Run 1.1p processing.
DC2_tracts = {}
DC2_tracts['Run1.1p'] = (5066, 5065, 5064, 5063, 5062,
4852, 4851, 4850, 4849, 4848,
4640, 4639, 4638, 4637, 4636,
4433, 4432, 4431, 4430, 4429,)
"""
parser = ArgumentParser(description=usage,
formatter_class=RawTextHelpFormatter)
parser.add_argument('repo', type=str,
help='Filepath to LSST DM Stack Butler repository.')
parser.add_argument('tract', type=int, nargs='+',
help='Skymap tract[s] to process.')
parser.add_argument('--patches', nargs='+',
help='''
Skymap patch[es] within each tract to process.
A common use-case for this option is quick testing.
''')
parser.add_argument('--name', default='object',
help='Base name of files: <name>_tract_5062.hdf5')
parser.add_argument('--output_dir', default='./',
help='Output directory. (default: %(default)s)')
parser.add_argument('--verbose', dest='verbose', default=True,
action='store_true', help='Verbose mode.')
parser.add_argument('--silent', dest='verbose', action='store_false',
help='Turn off verbosity.')
parser.add_argument('--hsc', dest='hsc', action='store_true',
help='Uses HSC filters')
args = parser.parse_args(sys.argv[1:])
if args.hsc:
filters = {'u': 'HSC-U', 'g': 'HSC-G', 'r': 'HSC-R', 'i': 'HSC-I',
'z': 'HSC-Z', 'y': 'HSC-Y'}
else:
filters = {'u': 'u', 'g': 'g', 'r': 'r', 'i': 'i', 'z': 'z', 'y': 'y'}
for tract in args.tract:
filebase = '{:s}_tract_{:d}'.format(args.name, tract)
filename = os.path.join(args.output_dir, filebase + '.hdf5')
load_and_save_tract(args.repo, tract, filename,
patches=args.patches, verbose=args.verbose,
filters=filters)
| 36.657738
| 117
| 0.610701
|
8c47e28d244db64aa55f7d7324f5a81a24f70942
| 189
|
py
|
Python
|
app/v1/messages.py
|
speduardo/flask-boilerplate
|
d50d8d0f15a08c4905a2029b0ae9637489624c9a
|
[
"MIT"
] | 1
|
2020-05-26T01:53:58.000Z
|
2020-05-26T01:53:58.000Z
|
app/v1/messages.py
|
speduardo/flask-boilerplate
|
d50d8d0f15a08c4905a2029b0ae9637489624c9a
|
[
"MIT"
] | null | null | null |
app/v1/messages.py
|
speduardo/flask-boilerplate
|
d50d8d0f15a08c4905a2029b0ae9637489624c9a
|
[
"MIT"
] | null | null | null |
MSG_TOKEN_CREATED = 'Token criado.'
MSG_INVALID_CREDENTIALS = 'As credenciais estão inválidas para log in.'
MSG_TOKEN_EXPIRED = 'Token expirou.'
MSG_PERMISSION_DENIED = 'Permissão negada.'
| 37.8
| 71
| 0.804233
|
d7a64608b886a6e985e00b6ab207c9d259e3633a
| 44,287
|
py
|
Python
|
tests/auth_tests/test_views.py
|
alvra/django
|
4a7b58210defea33a428b748ccbc97ae8fd49838
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/auth_tests/test_views.py
|
alvra/django
|
4a7b58210defea33a428b748ccbc97ae8fd49838
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/auth_tests/test_views.py
|
alvra/django
|
4a7b58210defea33a428b748ccbc97ae8fd49838
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2016-09-02T01:22:19.000Z
|
2016-09-02T01:22:19.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import itertools
import os
import re
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.contrib.auth import REDIRECT_FIELD_NAME, SESSION_KEY
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.views import login as login_view, redirect_to_login
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.requests import RequestSite
from django.core import mail
from django.core.urlresolvers import NoReverseMatch, reverse, reverse_lazy
from django.db import connection
from django.http import HttpRequest, QueryDict
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.test import TestCase, override_settings
from django.test.utils import patch_logger
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import ParseResult, urlparse
from django.utils.translation import LANGUAGE_SESSION_KEY
from .models import UUIDUser
from .settings import AUTH_TEMPLATES
@override_settings(
LANGUAGES=[
('en', 'English'),
],
LANGUAGE_CODE='en',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='testclient2@example.com', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='staffmember@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='empty_password@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='unmanageable_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='foo$bar', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='unknown_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@override_settings(ROOT_URLCONF='django.contrib.auth.urls')
class AuthViewNamedURLTests(AuthViewsTestCase):
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_extra_email_context(self):
"""
extra_email_context should be available in the email template context.
"""
response = self.client.post(
'/password_reset_extra_email_context/',
{'email': 'staffmember@example.com'},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Email email context: "Hello!"', mail.outbox[0].body)
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertNotIn('<html>', message.get_payload(0).get_payload())
self.assertIn('<html>', message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existent user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# #16919 -- The ``password_reset_confirm`` view should pass the user
# object to the ``SetPasswordForm``, even on GET requests.
# For this test, we render ``{{ form.user }}`` in the template
# ``registration/password_reset_confirm.html`` so that we can test this.
username = User.objects.get(email='staffmember@example.com').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
user_email = 'staffmember@example.com'
@classmethod
def setUpTestData(cls):
cls.u1 = CustomUser.custom_objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), email='staffmember@example.com', is_active=True,
is_admin=False, date_of_birth=datetime.date(1976, 11, 8)
)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': self.user_email})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
# then submit a new password
response = self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': 'anewpassword',
})
self.assertRedirects(response, '/reset/done/')
@override_settings(AUTH_USER_MODEL='auth.UUIDUser')
class UUIDUserPasswordResetTest(CustomUserPasswordResetTest):
def _test_confirm_start(self):
# instead of fixture
UUIDUser.objects.create_user(
email=self.user_email,
username='foo',
password='foo',
)
return super(UUIDUserPasswordResetTest, self)._test_confirm_start()
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
class SessionAuthenticationTests(AuthViewsTestCase):
def test_user_password_change_updates_session(self):
"""
#21649 - Ensure contrib.auth.views.password_change updates the user's
session auth hash after a password change so the session isn't logged out.
"""
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
# if the hash isn't updated, retrieving the redirection page will fail.
self.assertRedirects(response, '/password_change/done/')
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertIsInstance(response.context['form'], AuthenticationForm)
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
# get_token() triggers CSRF token inclusion in the response
get_token(req)
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
def test_session_key_flushed_on_login(self):
"""
To avoid reusing another user's session, ensure a new, empty session is
created if the existing session corresponds to a different authenticated
user.
"""
self.login()
original_session_key = self.client.session.session_key
self.login(username='staff')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_session_key_flushed_on_login_after_password_change(self):
"""
As above, but same user logging in after a password change.
"""
self.login()
original_session_key = self.client.session.session_key
# If no password change, session key should not be flushed.
self.login()
self.assertEqual(original_session_key, self.client.session.session_key)
user = User.objects.get(username='testclient')
user.set_password('foobar')
user.save()
self.login(password='foobar')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_login_session_without_hash_session_key(self):
"""
Session without django.contrib.auth.HASH_SESSION_KEY should login
without an exception.
"""
user = User.objects.get(username='testclient')
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[SESSION_KEY] = user.id
session.save()
original_session_key = session.session_key
self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key
self.login()
self.assertNotEqual(original_session_key, self.client.session.session_key)
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_lazy_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
class RedirectToLoginTests(AuthViewsTestCase):
"""Tests for the redirect_to_login view"""
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy(self):
login_redirect_response = redirect_to_login(next='/else/where/')
expected = '/login/?next=/else/where/'
self.assertEqual(expected, login_redirect_response.url)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy_and_unicode(self):
login_redirect_response = redirect_to_login(next='/else/where/झ/')
expected = '/login/?next=/else/where/%E0%A4%9D/'
self.assertEqual(expected, login_redirect_response.url)
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertNotIn(SESSION_KEY, self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertIn('site', response.context)
def test_logout_doesnt_cache(self):
"""
The logout() view should send "no-cache" headers for reasons described
in #25490.
"""
response = self.client.get('/logout/')
self.assertIn('no-store', response['Cache-Control'])
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[LANGUAGE_SESSION_KEY] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'pl')
# Redirect in test_user_change_password will fail if session auth hash
# isn't updated after password change (#21649)
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls_admin',
)
class ChangelistTests(AuthViewsTestCase):
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=self.u1.pk)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get(reverse('auth_test_admin:auth_user_changelist') + '?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
data
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
self.get_user_data(self.admin)
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
user_change_url = reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,))
password_change_url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,))
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'you can change the password using <a href="([^"]*)">this form</a>',
force_text(response.content)
).groups()[0]
self.assertEqual(
os.path.normpath(user_change_url + rel_link),
os.path.normpath(password_change_url)
)
response = self.client.post(
password_change_url,
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
def test_user_change_different_user_password(self):
u = User.objects.get(email='staffmember@example.com')
response = self.client.post(
reverse('auth_test_admin:auth_user_password_change', args=(u.pk,)),
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_change', args=(u.pk,)))
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
def test_password_change_bad_url(self):
response = self.client.get(reverse('auth_test_admin:auth_user_password_change', args=('foobar',)))
self.assertEqual(response.status_code, 404)
@override_settings(
AUTH_USER_MODEL='auth.UUIDUser',
ROOT_URLCONF='auth_tests.urls_custom_user_admin',
)
class UUIDUserTests(TestCase):
def test_admin_password_change(self):
u = UUIDUser.objects.create_superuser(username='uuid', email='foo@bar.com', password='test')
self.assertTrue(self.client.login(username='uuid', password='test'))
user_change_url = reverse('custom_user_admin:auth_uuiduser_change', args=(u.pk,))
response = self.client.get(user_change_url)
self.assertEqual(response.status_code, 200)
password_change_url = reverse('custom_user_admin:auth_user_password_change', args=(u.pk,))
response = self.client.get(password_change_url)
self.assertEqual(response.status_code, 200)
# A LogEntry is created with pk=1 which breaks a FK constraint on MySQL
with connection.constraint_checks_disabled():
response = self.client.post(password_change_url, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, 1) # harcoded in CustomUserAdmin.log_change()
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
| 43.333659
| 119
| 0.642649
|
d631f51bc460e99575bc7a98d8a4603fc927194d
| 661
|
py
|
Python
|
src/Lagrange/Lagrange.py
|
Roseck16/Interpolation
|
20513e02241824e37c9eab6642fc2f3139dd8e00
|
[
"MIT"
] | 1
|
2021-07-14T03:33:57.000Z
|
2021-07-14T03:33:57.000Z
|
src/Lagrange/Lagrange.py
|
Roseck16/Interpolation
|
20513e02241824e37c9eab6642fc2f3139dd8e00
|
[
"MIT"
] | null | null | null |
src/Lagrange/Lagrange.py
|
Roseck16/Interpolation
|
20513e02241824e37c9eab6642fc2f3139dd8e00
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
def lagrange(x, y, p):
n = len(x)
sum = 0
for k in range(n):
prod = product(x, p, k)
sum += y[k] * prod
return sum
def product(vec, p, i):
n = len(vec)
prod = 1
for index in range(n):
if index != i:
num = p - vec[index]
den = vec[i] - vec[index]
prod *= num/den
return prod
def graf(x, y, p, pol, WID, HEI):
f = plt.figure()
f.set_figwidth(WID)
f.set_figheight(HEI)
plt.plot(x,y,"o", label="$(x_i, y_i)$")
plt.plot(p, pol, label="$P(x)$")
plt.legend(loc="best")
plt.grid()
plt.show()
| 22.033333
| 43
| 0.511346
|
3b2c5732c57b83242a12293f9d9eadfc9811e42e
| 1,289
|
py
|
Python
|
wikiup/utils.py
|
boweeb/wikiup
|
2694a8a9bc3914507803abc0540850e25ec64fd2
|
[
"ISC"
] | null | null | null |
wikiup/utils.py
|
boweeb/wikiup
|
2694a8a9bc3914507803abc0540850e25ec64fd2
|
[
"ISC"
] | null | null | null |
wikiup/utils.py
|
boweeb/wikiup
|
2694a8a9bc3914507803abc0540850e25ec64fd2
|
[
"ISC"
] | null | null | null |
#!/usr/bin/env python3.6
# vim: set fileencoding=utf-8 tabstop=8 expandtab shiftwidth=4 softtabstop=4 smarttab autoindent:
# -*- coding: utf-8 -*-
"""wikiup.utils"""
import os
import getpass
from bs4.dammit import EntitySubstitution
def get_shell_username(username):
if not username:
return_ = os.environ['USER']
else:
return_ = username
return str(return_)
def require_password(password: str=None):
if not password:
return_ = getpass.getpass()
else:
return_ = password
return str(return_)
def get_option(opt: str, args: dict) -> str:
opt_env = 'WIKIUP_{}'.format(opt.upper())
if opt_env in os.environ.keys():
return os.environ[opt_env]
else:
return args['--{}'.format(opt)]
def compose_url(wid):
protocol = 'https'
target_host = 'wiki.signetaccel.com'
target_path = '/rest/api/content'
url = f'{protocol}://{target_host}{target_path}/{wid}'
return url
def non_newlines(tag):
return str(tag) != '\n'
def custom_formatter(string):
"""add " and ' to entity substitution"""
# return EntitySubstitution.substitute_html(string).replace('"', '"').replace("'", ''')
return EntitySubstitution.substitute_html(string).replace('"', '"')
| 23.436364
| 101
| 0.656323
|
a3224b5252f9be2811512b9383ee6d5b4422d8b8
| 2,942
|
py
|
Python
|
checker/admin.py
|
uktrade/url-protection-checker
|
6d5a3e1d450b6dfde8e5bb1d1af93846f440a3d5
|
[
"MIT"
] | null | null | null |
checker/admin.py
|
uktrade/url-protection-checker
|
6d5a3e1d450b6dfde8e5bb1d1af93846f440a3d5
|
[
"MIT"
] | 1
|
2022-02-15T10:06:14.000Z
|
2022-02-15T10:06:14.000Z
|
checker/admin.py
|
uktrade/url-protection-checker
|
6d5a3e1d450b6dfde8e5bb1d1af93846f440a3d5
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Spaces, ApplicationsItem, NonPaasSites
from django.forms import ValidationError, ModelForm
class CheckForm(ModelForm):
class Meta:
model = ApplicationsItem
exclude = []
def clean(self):
data = super().clean()
if not data['reporting_enabled'] and not data['reporting_disabled_reason']:
raise ValidationError('You need to supply a reason why report is disabled')
return data
class modeladmin(admin.ModelAdmin):
form = CheckForm
def space_toggle_enabled(modeladmin, request, queryset):
for space_name in queryset:
if space_name.check_enabled:
space_name.check_enabled = False
else:
space_name.check_enabled = True
space_name.save()
space_toggle_enabled.short_description = 'Toggle check enabled'
@admin.register(Spaces)
class service_admin(admin.ModelAdmin):
list_display = ('id', 'space_name', 'space_guid', 'check_enabled')
actions = [space_toggle_enabled, ]
def toggle_reporting(modeladmin, request, queryset):
for app_route in queryset:
if app_route.reporting_enabled:
app_route.reporting_enabled = False
else:
app_route.reporting_enabled = True
app_route.save()
toggle_reporting.short_description = 'Toggle check enabled'
@admin.register(ApplicationsItem)
class applicationsitem_admin(admin.ModelAdmin):
form = CheckForm
list_display = ('id',
'org_name',
'space_name',
'app_name',
'app_route',
'is_behind_vpn',
'is_behind_sso',
'is_behind_app_auth',
'reporting_enabled',
'is_protected',
'reporting_disabled_reason')
actions = [toggle_reporting, ]
list_filter = ('reporting_enabled', 'is_protected')
search_fields = ['applications__app_name',
'applications__spaces__orgs__org_name',
'applications__spaces__space_name']
def app_name(self, obj):
return (obj.applications.app_name)
def space_name(self, obj):
return (obj.applications.spaces.space_name)
def org_name(self, obj):
return (obj.applications.spaces.orgs.org_name)
def nonpaassites_toggle_reporting(modeladmin, request, queryset):
for site_name in queryset:
if site_name.reporting_enabled:
site_name.reporting_enabled = False
else:
site_name.reporting_enabled = True
site_name.save()
nonpaassites_toggle_reporting.short_description = 'Toggle check enabled'
@admin.register(NonPaasSites)
class nonpaassites_admin(admin.ModelAdmin):
list_display = ('id', 'site_name', 'site_url', 'is_protected', 'reporting_enabled')
actions = [nonpaassites_toggle_reporting, ]
| 28.288462
| 87
| 0.653637
|
251ffcead117cc333fa504a74eae6024f6120cb1
| 9,342
|
py
|
Python
|
nova/scheduler/filters/trusted_filter.py
|
windskyer/nova
|
df485344dfd0f9e927e51273a3aaa71d948f2980
|
[
"Apache-2.0"
] | null | null | null |
nova/scheduler/filters/trusted_filter.py
|
windskyer/nova
|
df485344dfd0f9e927e51273a3aaa71d948f2980
|
[
"Apache-2.0"
] | null | null | null |
nova/scheduler/filters/trusted_filter.py
|
windskyer/nova
|
df485344dfd0f9e927e51273a3aaa71d948f2980
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2012 Intel, Inc.
# Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Filter to add support for Trusted Computing Pools (EXPERIMENTAL).
Filter that only schedules tasks on a host if the integrity (trust)
of that host matches the trust requested in the ``extra_specs`` for the
flavor. The ``extra_specs`` will contain a key/value pair where the
key is ``trust``. The value of this pair (``trusted``/``untrusted``) must
match the integrity of that host (obtained from the Attestation
service) before the task can be scheduled on that host.
Note that the parameters to control access to the Attestation Service
are in the ``nova.conf`` file in a separate ``trust`` section. For example,
the config file will look something like:
[DEFAULT]
verbose=True
...
[trust]
server=attester.mynetwork.com
Details on the specific parameters can be found in the file
``trust_attest.py``.
Details on setting up and using an Attestation Service can be found at
the Open Attestation project at:
https://github.com/OpenAttestation/OpenAttestation
"""
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import requests
import nova.conf
from nova import context
from nova.i18n import _LW
from nova import objects
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class AttestationService(object):
# Provide access wrapper to attestation server to get integrity report.
def __init__(self):
self.api_url = CONF.trusted_computing.attestation_api_url
self.host = CONF.trusted_computing.attestation_server
self.port = CONF.trusted_computing.attestation_port
self.auth_blob = CONF.trusted_computing.attestation_auth_blob
self.key_file = None
self.cert_file = None
self.ca_file = CONF.trusted_computing.attestation_server_ca_file
self.request_count = 100
# If the CA file is not provided, let's check the cert if verification
# asked
self.verify = (not CONF.trusted_computing.attestation_insecure_ssl
and self.ca_file or True)
self.cert = (self.cert_file, self.key_file)
def _do_request(self, method, action_url, body, headers):
# Connects to the server and issues a request.
# :returns: result data
# :raises: IOError if the request fails
action_url = "https://%s:%s%s/%s" % (self.host, self.port,
self.api_url, action_url)
try:
res = requests.request(method, action_url, data=body,
headers=headers, cert=self.cert,
verify=self.verify)
status_code = res.status_code
if status_code in (requests.codes.OK,
requests.codes.CREATED,
requests.codes.ACCEPTED,
requests.codes.NO_CONTENT):
try:
return requests.codes.OK, jsonutils.loads(res.text)
except (TypeError, ValueError):
return requests.codes.OK, res.text
return status_code, None
except requests.exceptions.RequestException:
return IOError, None
def _request(self, cmd, subcmd, hosts):
body = {}
body['count'] = len(hosts)
body['hosts'] = hosts
cooked = jsonutils.dumps(body)
headers = {}
headers['content-type'] = 'application/json'
headers['Accept'] = 'application/json'
if self.auth_blob:
headers['x-auth-blob'] = self.auth_blob
status, res = self._do_request(cmd, subcmd, cooked, headers)
return status, res
def do_attestation(self, hosts):
"""Attests compute nodes through OAT service.
:param hosts: hosts list to be attested
:returns: dictionary for trust level and validate time
"""
result = None
status, data = self._request("POST", "PollHosts", hosts)
if data is not None:
result = data.get('hosts')
return result
class ComputeAttestationCache(object):
"""Cache for compute node attestation
Cache compute node's trust level for sometime,
if the cache is out of date, poll OAT service to flush the
cache.
OAT service may have cache also. OAT service's cache valid time
should be set shorter than trusted filter's cache valid time.
"""
def __init__(self):
self.attestservice = AttestationService()
self.compute_nodes = {}
admin = context.get_admin_context()
# Fetch compute node list to initialize the compute_nodes,
# so that we don't need poll OAT service one by one for each
# host in the first round that scheduler invokes us.
computes = objects.ComputeNodeList.get_all(admin)
for compute in computes:
host = compute.hypervisor_hostname
self._init_cache_entry(host)
def _cache_valid(self, host):
cachevalid = False
if host in self.compute_nodes:
node_stats = self.compute_nodes.get(host)
if not timeutils.is_older_than(
node_stats['vtime'],
CONF.trusted_computing.attestation_auth_timeout):
cachevalid = True
return cachevalid
def _init_cache_entry(self, host):
self.compute_nodes[host] = {
'trust_lvl': 'unknown',
'vtime': timeutils.normalize_time(
timeutils.parse_isotime("1970-01-01T00:00:00Z"))}
def _invalidate_caches(self):
for host in self.compute_nodes:
self._init_cache_entry(host)
def _update_cache_entry(self, state):
entry = {}
host = state['host_name']
entry['trust_lvl'] = state['trust_lvl']
try:
# Normalize as naive object to interoperate with utcnow().
entry['vtime'] = timeutils.normalize_time(
timeutils.parse_isotime(state['vtime']))
except ValueError:
try:
# Mt. Wilson does not necessarily return an ISO8601 formatted
# `vtime`, so we should try to parse it as a string formatted
# datetime.
vtime = timeutils.parse_strtime(state['vtime'], fmt="%c")
entry['vtime'] = timeutils.normalize_time(vtime)
except ValueError:
# Mark the system as un-trusted if get invalid vtime.
entry['trust_lvl'] = 'unknown'
entry['vtime'] = timeutils.utcnow()
self.compute_nodes[host] = entry
def _update_cache(self):
self._invalidate_caches()
states = self.attestservice.do_attestation(
list(self.compute_nodes.keys()))
if states is None:
return
for state in states:
self._update_cache_entry(state)
def get_host_attestation(self, host):
"""Check host's trust level."""
if host not in self.compute_nodes:
self._init_cache_entry(host)
if not self._cache_valid(host):
self._update_cache()
level = self.compute_nodes.get(host).get('trust_lvl')
return level
class ComputeAttestation(object):
def __init__(self):
self.caches = ComputeAttestationCache()
def is_trusted(self, host, trust):
level = self.caches.get_host_attestation(host)
return trust == level
class TrustedFilter(filters.BaseHostFilter):
"""Trusted filter to support Trusted Compute Pools."""
def __init__(self):
self.compute_attestation = ComputeAttestation()
LOG.warning(_LW('The TrustedFilter is considered experimental '
'by the OpenStack project because it receives much '
'less testing than the rest of Nova. This may change '
'in the future, but current deployers should be aware '
'that the use of it in production right now may be '
'risky.'))
# The hosts the instances are running on doesn't change within a request
run_filter_once_per_request = True
@filters.compat_legacy_props
def host_passes(self, host_state, filter_properties):
instance_type = filter_properties.get('instance_type', {})
extra = instance_type.get('extra_specs', {})
trust = extra.get('trust:trusted_host')
host = host_state.nodename
if trust:
return self.compute_attestation.is_trusted(host, trust)
return True
| 36.924901
| 79
| 0.634447
|
7d10ef1fabbe1543a4fd557a1b1b6c90a876924d
| 2,013
|
py
|
Python
|
var/spack/repos/builtin/packages/arborx/package.py
|
asmaahassan90/spack
|
b6779d2e31170eb77761f59bed640afbc469e4ec
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-05-24T15:23:12.000Z
|
2020-05-24T15:23:12.000Z
|
var/spack/repos/builtin/packages/arborx/package.py
|
asmaahassan90/spack
|
b6779d2e31170eb77761f59bed640afbc469e4ec
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 6
|
2022-02-26T11:44:34.000Z
|
2022-03-12T12:14:50.000Z
|
var/spack/repos/builtin/packages/arborx/package.py
|
asmaahassan90/spack
|
b6779d2e31170eb77761f59bed640afbc469e4ec
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2021-01-06T18:58:26.000Z
|
2021-01-06T18:58:26.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Arborx(CMakePackage):
"""ArborX is a performance-portable library for geometric search"""
homepage = "http://github.com/arborx/arborx"
url = "https://github.com/arborx/arborx/archive/v0.8-beta2.tar.gz"
git = "https://github.com/arborx/arborx.git"
version('master', branch='master')
version('0.8-beta2', sha256='e68733bc77fbb84313f3ff059f746fa79ab2ffe24a0a391126eefa47ec4fd2df')
variant('cuda', default=False, description='enable Cuda backend')
variant('openmp', default=False, description='enable OpenMP backend')
variant('serial', default=True, description='enable Serial backend (default)')
variant('mpi', default=True, description='enable MPI')
depends_on('cmake@3.12:', type='build')
depends_on('cuda', when='+cuda')
depends_on('mpi', when='+mpi')
# ArborX relies on Kokkos to provide devices, thus having one-to-one match
# The only way to disable those devices is to make sure Kokkos does not
# provide them
depends_on('kokkos-legacy@2.7.00:+cuda+enable_lambda cxxstd=c++14', when='+cuda')
depends_on('kokkos-legacy@2.7.00:+openmp cxxstd=c++14', when='+openmp')
depends_on('kokkos-legacy@2.7.00:+serial cxxstd=c++14', when='+serial')
def cmake_args(self):
spec = self.spec
options = [
'-DCMAKE_PREFIX_PATH=%s' % spec['kokkos-legacy'].prefix,
'-DARBORX_ENABLE_TESTS=OFF',
'-DARBORX_ENABLE_EXAMPLES=OFF',
'-DARBORX_ENABLE_BENCHMARKS=OFF',
'-DARBORX_ENABLE_MPI=%s' % ('ON' if '+mpi' in spec else 'OFF')
]
if '+cuda' in spec:
nvcc_wrapper_path = spec['kokkos'].prefix.bin.nvcc_wrapper
options.append('-DCMAKE_CXX_COMPILER=%s' % nvcc_wrapper_path)
return options
| 39.470588
| 99
| 0.670144
|
6aa497822c82716b9b8ce1f7ba832e46d424073c
| 8,176
|
py
|
Python
|
activitysim/abm/models/accessibility.py
|
SEMCOG/SEMCOG_ActSim
|
cc18cce84b2e4b5f380f58c7919953d2cd03ee73
|
[
"BSD-3-Clause"
] | null | null | null |
activitysim/abm/models/accessibility.py
|
SEMCOG/SEMCOG_ActSim
|
cc18cce84b2e4b5f380f58c7919953d2cd03ee73
|
[
"BSD-3-Clause"
] | 1
|
2021-06-30T23:39:37.000Z
|
2021-06-30T23:39:37.000Z
|
activitysim/abm/models/accessibility.py
|
SEMCOG/SEMCOG_ActSim
|
cc18cce84b2e4b5f380f58c7919953d2cd03ee73
|
[
"BSD-3-Clause"
] | null | null | null |
# ActivitySim
# See full license in LICENSE.txt.
import logging
import pandas as pd
import numpy as np
from activitysim.core import assign
from activitysim.core import tracing
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import pipeline
from activitysim.core import mem
from activitysim.core import los
from activitysim.core.pathbuilder import TransitVirtualPathBuilder
logger = logging.getLogger(__name__)
# class AccessibilitySkims(object):
# """
# Wrapper for skim arrays to facilitate use of skims by accessibility model
#
# Parameters
# ----------
# skims : 2D array
# omx: open omx file object
# this is only used to load skims on demand that were not preloaded
# length: int
# number of zones in skim to return in skim matrix
# in case the skims contain additional external zones that should be trimmed out so skim
# array is correct shape to match (flattened) O-D tiled columns in the od dataframe
# transpose: bool
# whether to transpose the matrix before flattening. (i.e. act as a D-O instead of O-D skim)
# """
#
# def __init__(self, skim_dict, orig_zones, dest_zones, transpose=False):
#
# logger.info(f"init AccessibilitySkims with {len(dest_zones)} dest zones {len(orig_zones)} orig zones")
#
# assert len(orig_zones) <= len(dest_zones)
# assert np.isin(orig_zones, dest_zones).all()
# assert len(np.unique(orig_zones)) == len(orig_zones)
# assert len(np.unique(dest_zones)) == len(dest_zones)
#
# self.skim_dict = skim_dict
# self.transpose = transpose
#
# num_skim_zones = skim_dict.get_skim_info('omx_shape')[0]
# if num_skim_zones == len(orig_zones) and skim_dict.offset_mapper.offset_series is None:
# # no slicing required because whatever the offset_int, the skim data aligns with zone list
# self.map_data = False
# else:
#
# logger.debug("AccessibilitySkims - applying offset_mapper")
#
# skim_index = list(range(num_skim_zones))
# orig_map = skim_dict.offset_mapper.map(orig_zones)
# dest_map = skim_dict.offset_mapper.map(dest_zones)
#
# # (we might be sliced multiprocessing)
# # assert np.isin(skim_index, orig_map).all()
#
# out_of_bounds = ~np.isin(skim_index, dest_map)
# # if out_of_bounds.any():
# # print(f"{(out_of_bounds).sum()} skim zones not in dest_map")
# # print(f"dest_zones {dest_zones}")
# # print(f"dest_map {dest_map}")
# # print(f"skim_index {skim_index}")
# assert not out_of_bounds.any(), \
# f"AccessibilitySkims {(out_of_bounds).sum()} skim zones not in dest_map: {np.ix_(out_of_bounds)[0]}"
#
# self.map_data = True
# self.orig_map = orig_map
# self.dest_map = dest_map
#
# def __getitem__(self, key):
# """
# accessor to return flattened skim array with specified key
# flattened array will have length length*length and will match tiled OD df used by assign
#
# this allows the skim array to be accessed from expressions as
# skim['DISTANCE'] or skim[('SOVTOLL_TIME', 'MD')]
# """
#
# data = self.skim_dict.get(key).data
#
# if self.transpose:
# data = data.transpose()
#
# if self.map_data:
# # slice skim to include only orig rows and dest columns
# # 2-d boolean slicing in numpy is a bit tricky
# # data = data[orig_map, dest_map] # <- WRONG!
# # data = data[orig_map, :][:, dest_map] # <- RIGHT
# # data = data[np.ix_(orig_map, dest_map)] # <- ALSO RIGHT
#
# data = data[self.orig_map, :][:, self.dest_map]
#
# return data.flatten()
@inject.step()
def compute_accessibility(accessibility, network_los, land_use, trace_od):
"""
Compute accessibility for each zone in land use file using expressions from accessibility_spec
The actual results depend on the expressions in accessibility_spec, but this is initially
intended to permit implementation of the mtc accessibility calculation as implemented by
Accessibility.job
Compute measures of accessibility used by the automobile ownership model.
The accessibility measure first multiplies an employment variable by a mode-specific decay
function. The product reflects the difficulty of accessing the activities the farther
(in terms of round-trip travel time) the jobs are from the location in question. The products
to each destination zone are next summed over each origin zone, and the logarithm of the
product mutes large differences. The decay function on the walk accessibility measure is
steeper than automobile or transit. The minimum accessibility is zero.
"""
trace_label = 'compute_accessibility'
model_settings = config.read_model_settings('accessibility.yaml')
assignment_spec = assign.read_assignment_spec(config.config_file_path('accessibility.csv'))
accessibility_df = accessibility.to_frame()
logger.info("Running %s with %d dest zones" % (trace_label, len(accessibility_df)))
constants = config.get_model_constants(model_settings)
land_use_columns = model_settings.get('land_use_columns', [])
land_use_df = land_use.to_frame()
land_use_df = land_use_df[land_use_columns]
# don't assume they are the same: accessibility may be sliced if we are multiprocessing
orig_zones = accessibility_df.index.values
dest_zones = land_use_df.index.values
orig_zone_count = len(orig_zones)
dest_zone_count = len(dest_zones)
logger.info("Running %s with %d dest zones %d orig zones" %
(trace_label, dest_zone_count, orig_zone_count))
# create OD dataframe
od_df = pd.DataFrame(
data={
'orig': np.repeat(orig_zones, dest_zone_count),
'dest': np.tile(dest_zones, orig_zone_count)
}
)
if trace_od:
trace_orig, trace_dest = trace_od
trace_od_rows = (od_df.orig == trace_orig) & (od_df.dest == trace_dest)
else:
trace_od_rows = None
# merge land_use_columns into od_df
od_df = pd.merge(od_df, land_use_df, left_on='dest', right_index=True).sort_index()
locals_d = {
'log': np.log,
'exp': np.exp,
'network_los': network_los,
}
skim_dict = network_los.get_default_skim_dict()
locals_d['skim_od'] = skim_dict.wrap('orig', 'dest').set_df(od_df)
locals_d['skim_do'] = skim_dict.wrap('dest', 'orig').set_df(od_df)
if network_los.zone_system == los.THREE_ZONE:
locals_d['tvpb'] = TransitVirtualPathBuilder(network_los)
if constants is not None:
locals_d.update(constants)
results, trace_results, trace_assigned_locals \
= assign.assign_variables(assignment_spec, od_df, locals_d, trace_rows=trace_od_rows)
for column in results.columns:
data = np.asanyarray(results[column])
data.shape = (orig_zone_count, dest_zone_count) # (o,d)
accessibility_df[column] = np.log(np.sum(data, axis=1) + 1)
logger.info("{trace_label} added {len(results.columns} columns")
# - write table to pipeline
pipeline.replace_table("accessibility", accessibility_df)
if trace_od:
if not trace_od_rows.any():
logger.warning(f"trace_od not found origin = {trace_orig}, dest = {trace_dest}")
else:
# add OD columns to trace results
df = pd.concat([od_df[trace_od_rows], trace_results], axis=1)
# dump the trace results table (with _temp variables) to aid debugging
tracing.trace_df(df,
label='accessibility',
index_label='skim_offset',
slicer='NONE',
warn_if_empty=True)
if trace_assigned_locals:
tracing.write_csv(trace_assigned_locals, file_name="accessibility_locals")
| 38.748815
| 118
| 0.658268
|
2f47750b7fe5e85c1d2741bbe7e61cb19bb2195e
| 1,602
|
py
|
Python
|
setup.py
|
elehcimd/metacash
|
3681df7d14d656ffb30d4b8720368845f99cce58
|
[
"MIT"
] | 1
|
2021-03-31T13:59:42.000Z
|
2021-03-31T13:59:42.000Z
|
setup.py
|
elehcimd/metacash
|
3681df7d14d656ffb30d4b8720368845f99cce58
|
[
"MIT"
] | 2
|
2021-03-31T14:28:11.000Z
|
2021-03-31T15:31:32.000Z
|
setup.py
|
elehcimd/metacash
|
3681df7d14d656ffb30d4b8720368845f99cce58
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from version import __version__
# Get the long description from the README file
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name='metacash',
version=__version__,
author='Michele Dallachiesa',
author_email='michele.dallachiesa@sigforge.com',
packages=find_packages(exclude=["tests"]),
scripts=[],
url='https://github.com/elehcimd/metacash',
license='MIT',
description='Keep a close eye on your financial transactions',
long_description=long_description,
python_requires=">=3.6",
install_requires=[
"fabric",
"jupyterlab",
"joblib",
"pandas",
"numpy",
"matplotlib",
"pycodestyle",
"pytest",
"autopep8",
"ipywidgets",
"colorama",
"qgrid"
],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
],
)
| 29.127273
| 77
| 0.613608
|
6d4f52c300778a43df6e86dd8de635f134b7a8ee
| 1,597
|
py
|
Python
|
plenum/test/view_change/test_view_change_start_without_primary.py
|
spivachuk/plenum
|
05123166e8ffa89520541ea3b59b20390aaf92a4
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/view_change/test_view_change_start_without_primary.py
|
spivachuk/plenum
|
05123166e8ffa89520541ea3b59b20390aaf92a4
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/view_change/test_view_change_start_without_primary.py
|
spivachuk/plenum
|
05123166e8ffa89520541ea3b59b20390aaf92a4
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from plenum.test.helper import stopNodes
from plenum.test.test_node import checkProtocolInstanceSetup, getRequiredInstances, \
checkNodesConnected
from plenum.test import waits
VIEW_CHANGE_TIMEOUT = 10
def test_view_change_without_primary(txnPoolNodeSet, looper,
patched_view_change_timeout):
first, others = stop_nodes_and_remove_first(looper, txnPoolNodeSet)
start_and_connect_nodes(looper, others)
timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + patched_view_change_timeout
#looper.runFor(40)
checkProtocolInstanceSetup(looper=looper, nodes=txnPoolNodeSet, retryWait=1,
customTimeout=timeout,
numInstances=getRequiredInstances(len(txnPoolNodeSet)))
def stop_nodes_and_remove_first(looper, nodes):
first_node = nodes[0]
stopNodes(nodes, looper)
looper.removeProdable(first_node)
looper.runFor(3) # let the nodes stop
return first_node, \
list(filter(lambda x: x.name != first_node.name, nodes))
def start_and_connect_nodes(looper, nodes):
for n in nodes:
n.start(looper.loop)
looper.run(checkNodesConnected(nodes))
@pytest.fixture(scope='function')
def patched_view_change_timeout(txnPoolNodeSet):
old_view_change_timeout = txnPoolNodeSet[0]._view_change_timeout
for node in txnPoolNodeSet:
node._view_change_timeout = VIEW_CHANGE_TIMEOUT
yield VIEW_CHANGE_TIMEOUT
for node in txnPoolNodeSet:
node._view_change_timeout = old_view_change_timeout
| 32.591837
| 98
| 0.733876
|
809941f37570d418a7b38de2b38c5d2800e58837
| 1,535
|
py
|
Python
|
azure-mgmt-batchai/azure/mgmt/batchai/models/clusters_list_by_resource_group_options_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-batchai/azure/mgmt/batchai/models/clusters_list_by_resource_group_options_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-batchai/azure/mgmt/batchai/models/clusters_list_by_resource_group_options_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ClustersListByResourceGroupOptions(Model):
"""Additional parameters for list_by_resource_group operation.
:param filter: An OData $filter clause.. Used to filter results that are
returned in the GET respnose.
:type filter: str
:param select: An OData $select clause. Used to select the properties to
be returned in the GET respnose.
:type select: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 files can be returned. Default value: 1000 .
:type max_results: int
"""
_attribute_map = {
'filter': {'key': '', 'type': 'str'},
'select': {'key': '', 'type': 'str'},
'max_results': {'key': '', 'type': 'int'},
}
def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, **kwargs) -> None:
super(ClustersListByResourceGroupOptions, self).__init__(**kwargs)
self.filter = filter
self.select = select
self.max_results = max_results
| 38.375
| 103
| 0.614984
|
92d39e65546fd95f0e25143ca60d41bceefe2836
| 1,174
|
py
|
Python
|
saleor/graphql/attribute/bulk_mutations.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/graphql/attribute/bulk_mutations.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/graphql/attribute/bulk_mutations.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
import graphene
from ...attribute import models
from ...core.permissions import PageTypePermissions
from ..core.mutations import ModelBulkDeleteMutation
from ..core.types.common import AttributeError
class AttributeBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of attribute IDs to delete."
)
class Meta:
description = "Deletes attributes."
model = models.Attribute
permissions = (PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,)
error_type_class = AttributeError
error_type_field = "attribute_errors"
class AttributeValueBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID,
required=True,
description="List of attribute value IDs to delete.",
)
class Meta:
description = "Deletes values of attributes."
model = models.AttributeValue
permissions = (PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,)
error_type_class = AttributeError
error_type_field = "attribute_errors"
| 31.72973
| 86
| 0.701022
|
9bb3d44fe5cf97bc78c4cef236104297ded650d1
| 3,450
|
py
|
Python
|
src/collectors/flume/flume.py
|
devanshukoyalkar-rubrik/Diamond
|
c4c3f2e4723c2e4381b7bf5348cc3a25f321315d
|
[
"MIT"
] | null | null | null |
src/collectors/flume/flume.py
|
devanshukoyalkar-rubrik/Diamond
|
c4c3f2e4723c2e4381b7bf5348cc3a25f321315d
|
[
"MIT"
] | null | null | null |
src/collectors/flume/flume.py
|
devanshukoyalkar-rubrik/Diamond
|
c4c3f2e4723c2e4381b7bf5348cc3a25f321315d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Collect statistics from Flume
#### Dependencies
* urllib2
* json or simplejson
"""
import urllib.request, urllib.error, urllib.parse
import diamond.collector
try:
import simplejson as json
except ImportError:
import json
class FlumeCollector(diamond.collector.Collector):
# items to collect
_metrics_collect = {
'CHANNEL': [
'ChannelFillPercentage',
'EventPutAttemptCount',
'EventPutSuccessCount',
'EventTakeAttemptCount',
'EventTakeSuccessCount'
],
'SINK': [
'BatchCompleteCount',
'BatchEmptyCount',
'BatchUnderflowCount',
'ConnectionClosedCount',
'ConnectionCreatedCount',
'ConnectionFailedCount',
'EventDrainAttemptCount',
'EventDrainSuccessCount'
],
'SOURCE': [
'AppendAcceptedCount',
'AppendBatchAcceptedCount',
'AppendBatchReceivedCount',
'AppendReceivedCount',
'EventAcceptedCount',
'EventReceivedCount',
'OpenConnectionCount'
]
}
def get_default_config_help(self):
config_help = super(FlumeCollector, self).get_default_config_help()
config_help.update({
'req_host': 'Hostname',
'req_port': 'Port',
'req_path': 'Path',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
default_config = super(FlumeCollector, self).get_default_config()
default_config['path'] = 'flume'
default_config['req_host'] = 'localhost'
default_config['req_port'] = 41414
default_config['req_path'] = '/metrics'
return default_config
def collect(self):
url = 'http://{0}:{1}{2}'.format(
self.config['req_host'],
self.config['req_port'],
self.config['req_path']
)
try:
resp = urllib.request.urlopen(url)
try:
j = json.loads(resp.read())
resp.close()
except Exception as e:
resp.close()
self.log.error('Cannot load json data: %s', e)
return None
except urllib.error.URLError as e:
self.log.error('Failed to open url: %s', e)
return None
except Exception as e:
self.log.error('Unknown error opening url: %s', e)
return None
for comp in j.items():
comp_name = comp[0]
comp_items = comp[1]
comp_type = comp_items['Type']
for item in self._metrics_collect[comp_type]:
if item.endswith('Count'):
metric_name = '{0}.{1}'.format(comp_name, item[:-5])
metric_value = int(comp_items[item])
self.publish_counter(metric_name, metric_value)
elif item.endswith('Percentage'):
metric_name = '{0}.{1}'.format(comp_name, item)
metric_value = float(comp_items[item])
self.publish_gauge(metric_name, metric_value)
else:
metric_name = item
metric_value = int(comp_items[item])
self.publish_gauge(metric_name, metric_value)
| 30
| 75
| 0.543478
|
4ee92b3494a6ef5a001d49e45fb1d8a1365e69a0
| 3,044
|
py
|
Python
|
Algorithm.Python/BasicTemplateFrameworkAlgorithm.py
|
BlackBoxAM/Lean
|
5ea9f04b104d27f0fcfe3a383a3a60ca12206d99
|
[
"Apache-2.0"
] | 6,580
|
2015-01-12T16:48:44.000Z
|
2022-03-31T22:05:09.000Z
|
Algorithm.Python/BasicTemplateFrameworkAlgorithm.py
|
BlackBoxAM/Lean
|
5ea9f04b104d27f0fcfe3a383a3a60ca12206d99
|
[
"Apache-2.0"
] | 3,392
|
2015-01-12T17:44:07.000Z
|
2022-03-30T20:34:03.000Z
|
Algorithm.Python/BasicTemplateFrameworkAlgorithm.py
|
BlackBoxAM/Lean
|
5ea9f04b104d27f0fcfe3a383a3a60ca12206d99
|
[
"Apache-2.0"
] | 3,354
|
2015-01-12T16:58:31.000Z
|
2022-03-31T00:56:03.000Z
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### Basic template framework algorithm uses framework components to define the algorithm.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="trading and orders" />
class BasicTemplateFrameworkAlgorithm(QCAlgorithm):
'''Basic template framework algorithm uses framework components to define the algorithm.'''
def Initialize(self):
''' Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
# Set requested data resolution
self.UniverseSettings.Resolution = Resolution.Minute
self.SetStartDate(2013,10,7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
# Forex, CFD, Equities Resolutions: Tick, Second, Minute, Hour, Daily.
# Futures Resolution: Tick, Second, Minute
# Options Resolution: Minute Only.
symbols = [ Symbol.Create("SPY", SecurityType.Equity, Market.USA) ]
# set algorithm framework models
self.SetUniverseSelection(ManualUniverseSelectionModel(symbols))
self.SetAlpha(ConstantAlphaModel(InsightType.Price, InsightDirection.Up, timedelta(minutes = 20), 0.025, None))
# We can define who often the EWPCM will rebalance if no new insight is submitted using:
# Resolution Enum:
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel(Resolution.Daily))
# timedelta
# self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel(timedelta(2)))
# A lamdda datetime -> datetime. In this case, we can use the pre-defined func at Expiry helper class
# self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel(Expiry.EndOfWeek))
self.SetExecution(ImmediateExecutionModel())
self.SetRiskManagement(MaximumDrawdownPercentPerSecurity(0.01))
self.Debug("numpy test >>> print numpy.pi: " + str(np.pi))
def OnOrderEvent(self, orderEvent):
if orderEvent.Status == OrderStatus.Filled:
self.Debug("Purchased Stock: {0}".format(orderEvent.Symbol))
| 49.901639
| 152
| 0.725033
|
d1f1b7b35eb764d2bea2a2fdecccb30a7b84a852
| 7,203
|
py
|
Python
|
cgt_calc/parsers/schwab.py
|
danielkza/capital-gains-calculator
|
70a2a5bd107311e26c5b82b5218cb569a8ee52a6
|
[
"MIT"
] | null | null | null |
cgt_calc/parsers/schwab.py
|
danielkza/capital-gains-calculator
|
70a2a5bd107311e26c5b82b5218cb569a8ee52a6
|
[
"MIT"
] | null | null | null |
cgt_calc/parsers/schwab.py
|
danielkza/capital-gains-calculator
|
70a2a5bd107311e26c5b82b5218cb569a8ee52a6
|
[
"MIT"
] | null | null | null |
"""Charles Schwab parser."""
from __future__ import annotations
from collections import defaultdict
import csv
from dataclasses import dataclass
import datetime
from decimal import Decimal
import itertools
from pathlib import Path
from cgt_calc.exceptions import (
ExchangeRateMissingError,
ParsingError,
SymbolMissingError,
UnexpectedColumnCountError,
UnexpectedRowCountError,
)
from cgt_calc.model import ActionType, BrokerTransaction
@dataclass
class AwardPrices:
"""Class to store initial stock prices."""
award_prices: dict[datetime.date, dict[str, Decimal]]
def get(self, date: datetime.date, symbol: str) -> Decimal:
"""Get initial stock price at given date."""
# Award dates may go back for few days, depending on
# holidays or weekends, so we do a linear search
# in the past to find the award price
for i in range(7):
to_search = date - datetime.timedelta(days=i)
if (
to_search in self.award_prices
and symbol in self.award_prices[to_search]
):
return self.award_prices[to_search][symbol]
raise ExchangeRateMissingError(symbol, date)
def action_from_str(label: str) -> ActionType:
"""Convert string label to ActionType."""
if label == "Buy":
return ActionType.BUY
if label == "Sell":
return ActionType.SELL
if label in [
"MoneyLink Transfer",
"Misc Cash Entry",
"Service Fee",
"Wire Funds",
"Funds Received",
"Journal",
"Cash In Lieu",
]:
return ActionType.TRANSFER
if label == "Stock Plan Activity":
return ActionType.STOCK_ACTIVITY
if label in ["Qualified Dividend", "Cash Dividend"]:
return ActionType.DIVIDEND
if label in ["NRA Tax Adj", "NRA Withholding", "Foreign Tax Paid"]:
return ActionType.TAX
if label == "ADR Mgmt Fee":
return ActionType.FEE
if label in ["Adjustment", "IRS Withhold Adj"]:
return ActionType.ADJUSTMENT
if label in ["Short Term Cap Gain", "Long Term Cap Gain"]:
return ActionType.CAPITAL_GAIN
if label == "Spin-off":
return ActionType.SPIN_OFF
if label == "Credit Interest":
return ActionType.INTEREST
raise ParsingError("schwab transactions", f"Unknown action: {label}")
class SchwabTransaction(BrokerTransaction):
"""Represent single Schwab transaction."""
def __init__(
self,
row: list[str],
file: str,
):
"""Create transaction from CSV row."""
if len(row) != 9:
raise UnexpectedColumnCountError(row, 9, file)
if row[8] != "":
raise ParsingError(file, "Column 9 should be empty")
as_of_str = " as of "
if as_of_str in row[0]:
index = row[0].find(as_of_str) + len(as_of_str)
date_str = row[0][index:]
else:
date_str = row[0]
date = datetime.datetime.strptime(date_str, "%m/%d/%Y").date()
self.raw_action = row[1]
action = action_from_str(self.raw_action)
symbol = row[2] if row[2] != "" else None
description = row[3]
quantity = Decimal(row[4]) if row[4] != "" else None
price = Decimal(row[5].replace("$", "")) if row[5] != "" else None
fees = Decimal(row[6].replace("$", "")) if row[6] != "" else Decimal(0)
amount = Decimal(row[7].replace("$", "")) if row[7] != "" else None
currency = "USD"
broker = "Charles Schwab"
super().__init__(
date,
action,
symbol,
description,
quantity,
price,
fees,
amount,
currency,
broker,
)
@staticmethod
def create(
row: list[str], file: str, awards_prices: AwardPrices
) -> SchwabTransaction:
"""Create and post process a SchwabTransaction."""
transaction = SchwabTransaction(row, file)
if (
transaction.price is None
and transaction.action == ActionType.STOCK_ACTIVITY
):
symbol = transaction.symbol
if symbol is None:
raise SymbolMissingError(transaction)
transaction.price = awards_prices.get(transaction.date, symbol)
return transaction
def read_schwab_transactions(
transactions_file: str, schwab_award_transactions_file: str | None
) -> list[BrokerTransaction]:
"""Read Schwab transactions from file."""
awards_prices = _read_schwab_awards(schwab_award_transactions_file)
try:
with Path(transactions_file).open(encoding="utf-8") as csv_file:
lines = list(csv.reader(csv_file))
# Remove headers and footer
lines = lines[2:-1]
transactions = [
SchwabTransaction.create(row, transactions_file, awards_prices)
for row in lines
]
transactions.reverse()
return list(transactions)
except FileNotFoundError:
print(f"WARNING: Couldn't locate Schwab transactions file({transactions_file})")
return []
def _read_schwab_awards(
schwab_award_transactions_file: str | None,
) -> AwardPrices:
"""Read initial stock prices from CSV file."""
initial_prices: dict[datetime.date, dict[str, Decimal]] = defaultdict(dict)
lines = []
if schwab_award_transactions_file is not None:
try:
with Path(schwab_award_transactions_file).open(
encoding="utf-8"
) as csv_file:
lines = list(csv.reader(csv_file))
# Remove headers
lines = lines[2:]
except FileNotFoundError:
print(
"WARNING: Couldn't locate Schwab award "
f"file({schwab_award_transactions_file})"
)
else:
print("WARNING: No schwab award file provided")
modulo = len(lines) % 3
if modulo != 0:
raise UnexpectedRowCountError(
len(lines) - modulo + 3, schwab_award_transactions_file or ""
)
for row in zip(lines[::3], lines[1::3], lines[2::3]):
if len(row) != 3:
raise UnexpectedColumnCountError(
list(itertools.chain(*row)), 3, schwab_award_transactions_file or ""
)
lapse_main, _, lapse_data = row
if len(lapse_main) != 8:
raise UnexpectedColumnCountError(
lapse_main, 8, schwab_award_transactions_file or ""
)
if len(lapse_data) != 8:
raise UnexpectedColumnCountError(
lapse_data, 7, schwab_award_transactions_file or ""
)
date_str = lapse_main[0]
date = datetime.datetime.strptime(date_str, "%Y/%m/%d").date()
symbol = lapse_main[2] if lapse_main[2] != "" else None
price = Decimal(lapse_data[3].replace("$", "")) if lapse_data[3] != "" else None
if symbol is not None and price is not None:
initial_prices[date][symbol] = price
return AwardPrices(award_prices=dict(initial_prices))
| 32.013333
| 88
| 0.597945
|
6725d61a3855f7577f59bd2dc2056c63f50df8e0
| 3,124
|
py
|
Python
|
code/gan/mygan.py
|
T-tssxuan/machine_learning_step
|
113d0b1a2cacd2b03aa06c0c2bae7d65f05ac0ce
|
[
"MIT"
] | 7
|
2017-12-27T08:45:20.000Z
|
2020-07-15T11:51:32.000Z
|
code/gan/mygan.py
|
T-tssxuan/machine_learning_step
|
113d0b1a2cacd2b03aa06c0c2bae7d65f05ac0ce
|
[
"MIT"
] | null | null | null |
code/gan/mygan.py
|
T-tssxuan/machine_learning_step
|
113d0b1a2cacd2b03aa06c0c2bae7d65f05ac0ce
|
[
"MIT"
] | 3
|
2018-07-09T12:11:11.000Z
|
2020-06-28T13:57:08.000Z
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2)
return tf.random_normal(shape=size, stddev=xavier_stddev)
X =tf.placeholder(tf.float32, shape=[None, 784])
W1_d = tf.Variable(xavier_init([784, 128]))
B1_d = tf.Variable(xavier_init([128]))
W2_d = tf.Variable(xavier_init([128, 1]))
B2_d = tf.Variable(xavier_init([1]))
def discriminator(x):
h1 = tf.nn.relu(tf.matmul(x, W1_d) + B1_d)
logit = tf.matmul(h1, W2_d) + B2_d
prob = tf.nn.sigmoid(logit)
return prob, logit
W1_g = tf.Variable(xavier_init([100, 256]))
B1_g = tf.Variable(xavier_init([256]))
W2_g = tf.Variable(xavier_init([256, 784]))
B2_g = tf.Variable(xavier_init([784]))
def generator(z):
h1 = tf.nn.relu(tf.matmul(z, W1_g) + B1_g)
logit = tf.matmul(h1, W2_g) + B2_g
prob = tf.nn.sigmoid(logit)
return prob
Z = tf.placeholder(tf.float32, shape=[None, 100])
sample = generator(Z)
real_d, logit_real_d = discriminator(X)
gen_d, logit_gen_d = discriminator(sample)
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logit_real_d, labels=tf.ones_like(logit_real_d)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logit_gen_d, labels=tf.zeros_like(logit_gen_d)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logit_gen_d, labels=tf.ones_like(logit_gen_d)))
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=[W1_d, B1_d, W2_d, B2_d])
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=[W1_g, B1_g, W2_g, B2_g])
batch_size = 128
Z_dim = 100
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def plot(samples):
fig = plt.figure(figsize=(5, 5))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
def sample_Z(batch_size, Z_dim):
return np.random.uniform(-1, 1, size=(batch_size, Z_dim))
if not os.path.exists('vanilla/'):
os.mkdir('vanilla/')
i = 0
for it in range(100000):
if it % 1000 == 0:
samples = sess.run(sample, feed_dict={Z: sample_Z(16, Z_dim)})
fig = plot(samples)
plt.savefig('vanilla/mygan-{}.png'.format(str(i).zfill(3)), bbox_inches='tight')
i += 1
plt.close(fig)
X_batch, _ = mnist.train.next_batch(batch_size)
_, D_loss_cur = sess.run([D_solver, D_loss], feed_dict={X: X_batch, Z: sample_Z(batch_size, Z_dim)})
_, G_loss_cur = sess.run([G_solver, G_loss], feed_dict={Z: sample_Z(batch_size, Z_dim)})
if it % 1000 == 0:
print('Iter: {}, D_loss: {}, G_loss: {}'.format(it, D_loss_cur, G_loss_cur))
| 32.884211
| 125
| 0.693662
|
044a268ee23605a26f987412aded5ecc5675c175
| 962
|
py
|
Python
|
posts/models.py
|
mmanchev23/network
|
248a23089640096fe866abc4557e82383768b5bc
|
[
"MIT"
] | null | null | null |
posts/models.py
|
mmanchev23/network
|
248a23089640096fe866abc4557e82383768b5bc
|
[
"MIT"
] | null | null | null |
posts/models.py
|
mmanchev23/network
|
248a23089640096fe866abc4557e82383768b5bc
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
pass
class Profile(models.Model):
target = models.ForeignKey('User', on_delete=models.CASCADE, related_name='folowers')
follower = models.ForeignKey('User', on_delete=models.CASCADE, related_name='targets')
class Post(models.Model):
content = models.CharField(max_length=255)
user = models.ForeignKey('User', on_delete=models.CASCADE, related_name='author')
date = models.DateTimeField(default=datetime.now())
liked = models.ManyToManyField('User', default=None, blank=True, related_name='post_likes')
@property
def num_likes(self):
return self.liked.all().count()
class Like(models.Model):
user = models.ForeignKey('User', on_delete=models.CASCADE)
post = models.ForeignKey('Post', on_delete=models.CASCADE)
def __str__(self):
return str(self.post)
| 29.151515
| 95
| 0.72973
|
d87c076859b67b7a3efc53e2bacdc3b9c5f61080
| 3,965
|
py
|
Python
|
ml-agents/mlagents/trainers/tests/torch/test_sac.py
|
LoopySantos27/ml-agents
|
2b175c8ea65d75814654812e1357d15be8b40d3f
|
[
"Apache-2.0"
] | 13,653
|
2017-09-19T15:56:02.000Z
|
2022-03-31T18:55:07.000Z
|
ml-agents/mlagents/trainers/tests/torch/test_sac.py
|
LoopySantos27/ml-agents
|
2b175c8ea65d75814654812e1357d15be8b40d3f
|
[
"Apache-2.0"
] | 3,623
|
2017-09-20T02:50:20.000Z
|
2022-03-31T06:37:25.000Z
|
ml-agents/mlagents/trainers/tests/torch/test_sac.py
|
LoopySantos27/ml-agents
|
2b175c8ea65d75814654812e1357d15be8b40d3f
|
[
"Apache-2.0"
] | 4,130
|
2017-09-19T17:36:34.000Z
|
2022-03-31T12:54:55.000Z
|
import pytest
from mlagents.torch_utils import torch
from mlagents.trainers.buffer import BufferKey, RewardSignalUtil
from mlagents.trainers.sac.optimizer_torch import TorchSACOptimizer
from mlagents.trainers.policy.torch_policy import TorchPolicy
from mlagents.trainers.tests import mock_brain as mb
from mlagents.trainers.settings import NetworkSettings
from mlagents.trainers.tests.dummy_config import ( # noqa: F401
sac_dummy_config,
curiosity_dummy_config,
)
@pytest.fixture
def dummy_config():
return sac_dummy_config()
VECTOR_ACTION_SPACE = 2
VECTOR_OBS_SPACE = 8
DISCRETE_ACTION_SPACE = [3, 3, 3, 2]
BUFFER_INIT_SAMPLES = 64
NUM_AGENTS = 12
def create_sac_optimizer_mock(dummy_config, use_rnn, use_discrete, use_visual):
mock_brain = mb.setup_test_behavior_specs(
use_discrete,
use_visual,
vector_action_space=DISCRETE_ACTION_SPACE
if use_discrete
else VECTOR_ACTION_SPACE,
vector_obs_space=VECTOR_OBS_SPACE if not use_visual else 0,
)
trainer_settings = dummy_config
trainer_settings.network_settings.memory = (
NetworkSettings.MemorySettings(sequence_length=16, memory_size=12)
if use_rnn
else None
)
policy = TorchPolicy(0, mock_brain, trainer_settings)
optimizer = TorchSACOptimizer(policy, trainer_settings)
return optimizer
@pytest.mark.parametrize("discrete", [True, False], ids=["discrete", "continuous"])
@pytest.mark.parametrize("visual", [True, False], ids=["visual", "vector"])
@pytest.mark.parametrize("rnn", [True, False], ids=["rnn", "no_rnn"])
def test_sac_optimizer_update(dummy_config, rnn, visual, discrete):
torch.manual_seed(0)
# Test evaluate
optimizer = create_sac_optimizer_mock(
dummy_config, use_rnn=rnn, use_discrete=discrete, use_visual=visual
)
# Test update
update_buffer = mb.simulate_rollout(
BUFFER_INIT_SAMPLES, optimizer.policy.behavior_spec, memory_size=12
)
# Mock out reward signal eval
update_buffer[RewardSignalUtil.rewards_key("extrinsic")] = update_buffer[
BufferKey.ENVIRONMENT_REWARDS
]
# Mock out value memories
update_buffer[BufferKey.CRITIC_MEMORY] = update_buffer[BufferKey.MEMORY]
return_stats = optimizer.update(
update_buffer,
num_sequences=update_buffer.num_experiences // optimizer.policy.sequence_length,
)
# Make sure we have the right stats
required_stats = [
"Losses/Policy Loss",
"Losses/Value Loss",
"Losses/Q1 Loss",
"Losses/Q2 Loss",
"Policy/Continuous Entropy Coeff",
"Policy/Discrete Entropy Coeff",
"Policy/Learning Rate",
]
for stat in required_stats:
assert stat in return_stats.keys()
@pytest.mark.parametrize("discrete", [True, False], ids=["discrete", "continuous"])
def test_sac_update_reward_signals(
dummy_config, curiosity_dummy_config, discrete # noqa: F811
):
# Add a Curiosity module
dummy_config.reward_signals = curiosity_dummy_config
optimizer = create_sac_optimizer_mock(
dummy_config, use_rnn=False, use_discrete=discrete, use_visual=False
)
# Test update, while removing PPO-specific buffer elements.
update_buffer = mb.simulate_rollout(
BUFFER_INIT_SAMPLES, optimizer.policy.behavior_spec
)
# Mock out reward signal eval
update_buffer[RewardSignalUtil.rewards_key("extrinsic")] = update_buffer[
BufferKey.ENVIRONMENT_REWARDS
]
update_buffer[RewardSignalUtil.rewards_key("curiosity")] = update_buffer[
BufferKey.ENVIRONMENT_REWARDS
]
return_stats = optimizer.update_reward_signals(
{"curiosity": update_buffer}, num_sequences=update_buffer.num_experiences
)
required_stats = ["Losses/Curiosity Forward Loss", "Losses/Curiosity Inverse Loss"]
for stat in required_stats:
assert stat in return_stats.keys()
if __name__ == "__main__":
pytest.main()
| 34.181034
| 88
| 0.729887
|
5d29496bcbbfa93ae251ca14537f02c619db56ad
| 21,069
|
py
|
Python
|
skipgrammar/datasets/common.py
|
eifuentes/seqems
|
73b51e9eaa68b23b615d90d73e3f2184b9d783a0
|
[
"MIT"
] | null | null | null |
skipgrammar/datasets/common.py
|
eifuentes/seqems
|
73b51e9eaa68b23b615d90d73e3f2184b9d783a0
|
[
"MIT"
] | null | null | null |
skipgrammar/datasets/common.py
|
eifuentes/seqems
|
73b51e9eaa68b23b615d90d73e3f2184b9d783a0
|
[
"MIT"
] | 1
|
2021-09-17T22:27:15.000Z
|
2021-09-17T22:27:15.000Z
|
"""
Common utilities for datasets.
References:
- [Keras utils](https://github.com/keras-team/keras/tree/34231971fa47cb2477b357c1a368978de4128294/keras/utils)
- [Lenskit datasets](https://github.com/lenskit/lkpy/blob/master/lenskit/datasets.py)
"""
import collections
import hashlib
import logging
import os
import shutil
import sys
import tarfile
import time
import zipfile
from urllib.error import HTTPError, URLError
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
from torch.utils.data import Dataset as MapDataset
from torch.utils.data import IterableDataset, RandomSampler, SequentialSampler
logger = logging.getLogger(__name__)
class Progbar(object):
"""Displays a progress bar.
# Arguments
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(
self, target, width=30, verbose=1, interval=0.05, stateful_metrics=None
):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = (
hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
) or "ipykernel" in sys.modules
self._total_width = 0
self._seen_so_far = 0
self._values = collections.OrderedDict()
self._start = time.time()
self._last_update = 0
def update(self, current, values=None):
"""Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [
v * (current - self._seen_so_far),
current - self._seen_so_far,
]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += current - self._seen_so_far
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = " - %.0fs" % (now - self._start)
if self.verbose == 1:
if (
now - self._last_update < self.interval
and self.target is not None
and current < self.target
):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
else:
sys.stdout.write("\n")
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = "%%%dd/%d [" % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += "=" * (prog_width - 1)
if current < self.target:
bar += ">"
else:
bar += "="
bar += "." * (self.width - prog_width)
bar += "]"
else:
bar = "%7d/Unknown" % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = "%d:%02d:%02d" % (
eta // 3600,
(eta % 3600) // 60,
eta % 60,
)
elif eta > 60:
eta_format = "%d:%02d" % (eta // 60, eta % 60)
else:
eta_format = "%ds" % eta
info = " - ETA: %s" % eta_format
else:
if time_per_unit >= 1:
info += " %.0fs/step" % time_per_unit
elif time_per_unit >= 1e-3:
info += " %.0fms/step" % (time_per_unit * 1e3)
else:
info += " %.0fus/step" % (time_per_unit * 1e6)
for k in self._values:
info += " - %s:" % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += " %.4f" % avg
else:
info += " %.4e" % avg
else:
info += " %s" % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += " " * (prev_total_width - self._total_width)
if self.target is not None and current >= self.target:
info += "\n"
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
for k in self._values:
info += " - %s:" % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += " %.4f" % avg
else:
info += " %.4e" % avg
info += "\n"
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def _extract_archive(file_path, path=".", archive_format="auto"):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
# Arguments
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
# Returns
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == "auto":
archive_format = ["tar", "zip"]
if isinstance(archive_format, str):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == "tar":
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == "zip":
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def _hash_file(fpath, algorithm="sha256", chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
# Example
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
# Arguments
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
The file hash
"""
if (algorithm == "sha256") or (algorithm == "auto" and len(hash) == 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, "rb") as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b""):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm="auto", chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
# Arguments
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
Whether the file is valid
"""
if (algorithm == "sha256") or (algorithm == "auto" and len(file_hash) == 64):
hasher = "sha256"
else:
hasher = "md5"
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
def get_file(
fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir="datasets",
hash_algorithm="auto",
extract=False,
archive_format="auto",
cache_dir=None,
):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.skipgrammar`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.skipgrammar/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
# Arguments
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Seqrep cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the Seqrep directory.
# Returns
Path to the downloaded file
""" # noqa
if cache_dir is None:
if "SEQREP_HOME" in os.environ:
cache_dir = os.environ.get("SEQREP_HOME")
else:
if os.access(os.path.expanduser("~"), os.W_OK):
os.makedirs(
os.path.join(os.path.expanduser("~"), ".skipgrammar"), exist_ok=True
)
cache_dir = os.path.join(os.path.expanduser("~"), ".skipgrammar")
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = "md5"
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".skipgrammar")
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + ".tar.gz"
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print(
"A local file was found, but it seems to be "
"incomplete or outdated because the "
+ hash_algorithm
+ " file hash does not match the original value of "
+ file_hash
+ " so we will re-download the data."
)
download = True
else:
download = True
if download:
print("Downloading data from", origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = "URL fetch failure on {} : {} -- {}"
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format="tar")
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def cached(prop):
cache = "_cached_" + prop.__name__
def getter(self):
val = getattr(self, cache, None)
if val is None:
val = prop(self)
setattr(self, cache, val)
return val
getter.__doc__ = prop.__doc__
return property(getter)
class UserItemMapDataset(MapDataset):
def __init__(
self,
user_item_df,
max_window_size_lr=10,
max_sequence_length=20,
user_col="user",
sort_col="timestamp",
item_col="id",
session_col=None,
):
super().__init__()
# populate anchors and targets
self.anchors, self.targets = UserItemMapDataset.to_anchors_targets(
user_item_df,
max_window_size_lr=max_window_size_lr,
max_sequence_length=max_sequence_length,
user_col=user_col,
sort_col=sort_col,
item_col=item_col,
)
def __len__(self):
return len(self.anchors)
def __getitem__(self, index):
return self.anchors[index], self.targets[index]
@staticmethod
def get_target_items(sequence, anchor_index, window_size=2):
rand_num_items_lr = np.random.randint(1, window_size + 1)
start = (
anchor_index - rand_num_items_lr
if (anchor_index - rand_num_items_lr) > 0
else 0
)
stop = anchor_index + rand_num_items_lr
target_items = (
sequence[start:anchor_index] + sequence[anchor_index + 1 : stop + 1]
)
return list(target_items)
@staticmethod
def to_anchors_targets(
user_item_df,
max_window_size_lr=10,
max_sequence_length=20,
user_col="user",
sort_col="timestamp",
item_col="id",
session_col=None,
):
anchors, targets = list(), list()
iter_upper_bound = max_sequence_length - max_window_size_lr
groupbycols = [user_col, session_col] if session_col else [user_col]
for user_id, user_df in user_item_df.sort_values(
[user_col, sort_col], ascending=True
).groupby(groupbycols):
id_sequence = user_df[item_col].tolist()
id_sequence.reverse() # most recent first
id_sequence = (
id_sequence[:max_sequence_length]
if len(id_sequence) > max_sequence_length
else id_sequence
)
for anchor_index in range(0, min(iter_upper_bound, len(id_sequence))):
_targets = UserItemMapDataset.get_target_items(
id_sequence, anchor_index, window_size=max_window_size_lr
) # stochastic method
_anchors = [id_sequence[anchor_index]] * len(_targets)
anchors += _anchors
targets += _targets
return anchors, targets
class UserItemIterableDataset(IterableDataset):
def __init__(
self,
user_item_df,
max_window_size_lr=10,
max_sequence_length=20,
user_col="user",
sort_col="timestamp",
item_col="id",
session_timedelta="800s",
shuffle=True,
):
super().__init__()
self.df = user_item_df
self.max_sequence_length = max_sequence_length
self.max_window_size_lr = max_window_size_lr
self.user_col = user_col
self.sort_col = sort_col
self.item_col = item_col
self.shuffle = shuffle
self.dataset = None
# session identification
self.sessions(timedelta=session_timedelta)
def __iter__(self):
self.dataset = UserItemMapDataset(
self.df,
max_window_size_lr=self.max_window_size_lr,
max_sequence_length=self.max_sequence_length,
user_col=self.user_col,
sort_col=self.sort_col,
item_col=self.item_col,
session_col="session_nbr",
)
if self.shuffle:
sampler = RandomSampler(self.dataset)
else:
sampler = SequentialSampler(self.dataset)
logger.debug(f"built stochastic skip-gram dataset n=({len(self.dataset):,})")
for rand_index in sampler:
yield self.dataset[rand_index]
def sessions(self, timedelta="800s"):
self.df["session_end"] = (
self.df.sort_values([self.user_col, self.sort_col], ascending=True)
.groupby(self.user_col)[self.sort_col]
.diff(periods=1)
> pd.Timedelta(timedelta)
).astype(int)
self.df["session_nbr"] = self.df.groupby(self.user_col).session_end.cumsum() + 1
@staticmethod
def item_frequencies(df, item_col="id"):
item_cnts = df[item_col].value_counts()
total = item_cnts.sum()
item_freq = (item_cnts / total).sort_index()
return item_freq
@staticmethod
def subsample(df, item_col="id", thresh=1e-5):
item_freq = UserItemIterableDataset.item_frequencies(df, item_col)
discard_dist = 1 - np.sqrt(thresh / item_freq)
subsampled = discard_dist.loc[
(1 - discard_dist) > np.random.random(size=len(discard_dist))
]
return subsampled.index.tolist()
@staticmethod
def item_distribution(df, item_col="id", p=0.75):
item_freq = UserItemIterableDataset.item_frequencies(df, item_col)
item_dist = (item_freq ** (p)) / np.sum(item_freq ** (p))
return item_dist
| 36.138937
| 110
| 0.568323
|
3cd9d390b25ff5f54acd42dcde90eab9f963ea7a
| 1,085
|
py
|
Python
|
registry/migrations/0001_initial.py
|
jhonssegura/django-issuing-system
|
5b8ceb177edbe5a966e41a618c14b425d653ba0c
|
[
"Apache-2.0"
] | null | null | null |
registry/migrations/0001_initial.py
|
jhonssegura/django-issuing-system
|
5b8ceb177edbe5a966e41a618c14b425d653ba0c
|
[
"Apache-2.0"
] | null | null | null |
registry/migrations/0001_initial.py
|
jhonssegura/django-issuing-system
|
5b8ceb177edbe5a966e41a618c14b425d653ba0c
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 4.0 on 2021-12-18 20:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_cod', models.CharField(max_length=200, unique=True, verbose_name='Codigo Identificacion')),
('event_nom', models.CharField(max_length=200, verbose_name='Nombre del Evento')),
('event_date_init', models.DateField(blank=True, null=True, verbose_name='Fecha de Inicio')),
('even_date_end', models.DateField(blank=True, null=True, verbose_name='Fecha de Finalizacion')),
('event_site', models.TextField(blank=True, max_length=500, null=True, verbose_name='Lugar del Evento')),
('event_url', models.URLField(blank=True, null=True, verbose_name='Pagina Web')),
],
),
]
| 40.185185
| 121
| 0.62765
|
32c73d64227e70662fed045d2fbdc56d7cfea9af
| 4,042
|
py
|
Python
|
annolid/data/augmentation.py
|
jeremyforest/annolid
|
88bb528ee5a39a84a08631b27b934191a8822048
|
[
"MIT"
] | 9
|
2020-09-12T00:12:06.000Z
|
2022-03-25T19:16:17.000Z
|
annolid/data/augmentation.py
|
jeremyforest/annolid
|
88bb528ee5a39a84a08631b27b934191a8822048
|
[
"MIT"
] | 34
|
2020-09-07T20:25:46.000Z
|
2022-02-28T19:36:54.000Z
|
annolid/data/augmentation.py
|
jeremyforest/annolid
|
88bb528ee5a39a84a08631b27b934191a8822048
|
[
"MIT"
] | 3
|
2020-09-18T03:06:41.000Z
|
2021-10-14T22:52:49.000Z
|
import imageio
import json
from pathlib import Path
import pandas as pd
import collections
import numpy as np
from imgaug import augmenters as iaa
import imgaug as ia
from imgaug.augmentables.polys import Polygon, PolygonsOnImage
class LabelStats:
def __init__(self, anno_dir):
self.anno_dir = anno_dir
self.instance_counter = collections.defaultdict(int)
def count(self):
label_files = Path(self.anno_dir).glob("*.json")
for lf in label_files:
label_file = json.loads(lf.read_bytes())
for shape in label_file['shapes']:
label = shape['label']
self.instance_counter[label] += 1
return self.instance_counter
def to_table(self):
count_dict = self.count()
df = pd.DataFrame(
dict(count_dict).items(),
columns=['instance_name', 'counts'
]
)
return df.sort_values(by="counts")
class Augmentation(LabelStats):
def __init__(self, anno_dir):
ia.seed(4)
self.anno_dir = anno_dir
super(Augmentation, self).__init__(anno_dir)
self.df = self.to_table()
self.df.reset_index(drop=True, inplace=True)
self.augment_list = []
def augment(self):
few_instances = self.df[self.df['counts'] < self.df['counts'].median()]
aug_list = list(few_instances['instance_name'])
label_files = Path(self.anno_dir).glob("*.json")
aug = iaa.Sequential([
iaa.AdditiveGaussianNoise(scale=10),
# The following transformations will change the polygon
# iaa.Affine(rotate=(-0.05, 0.05), translate_percent=(-0.05, 0.05), scale=(0.8, 1.2),
# mode=["constant", "edge"], cval=0),
# iaa.CoarseDropout(0.1,size_px=8),
# iaa.Fliplr(0.5),
#iaa.PerspectiveTransform((0.01, 0.01)),
#iaa.LinearContrast((0.8, 1.2), per_channel=0.5),
iaa.Sometimes(0.05, iaa.Snowflakes()),
iaa.AddToHueAndSaturation((-50, 50)),
])
for lf in label_files:
label_file = json.loads(lf.read_bytes())
img_path = lf.with_suffix('.jpg')
img = imageio.imread(img_path)
image_polys = np.copy(img)
polys = []
is_aug = False
aug_dir = img_path.parent.parent / (img_path.parent.stem + '_aug')
aug_dir.mkdir(exist_ok=True)
for i, shape in enumerate(label_file['shapes']):
label = shape['label']
if label in aug_list:
is_aug = True
points = shape['points']
polygon = Polygon(points, [label])
psoi = ia.PolygonsOnImage(
[polygon], shape=image_polys.shape)
instance_counts_median = self.df['counts'].median()
instance_counts = (
self.df[self.df['instance_name'] == label]['counts'].values[0])
for j in range(int(instance_counts_median - instance_counts)):
aug_img, psoi_aug = aug(
image=image_polys, polygons=psoi)
aug_img_path = aug_dir / \
(img_path.stem + f'_{j}_aug.jpg')
aug_json_path = aug_img_path.with_suffix('.json')
aug_points = psoi_aug.polygons[0].exterior
imageio.imsave(aug_img_path, aug_img, '.jpg')
label_file["imageData"] = None
label_file['imagePath'] = aug_img_path.name
with open(aug_json_path, "w") as f:
json.dump(label_file, f,
ensure_ascii=False, indent=2)
label_file['shapes'][i]['points'] = aug_points.tolist()
self.augment_list.append(lf)
return set(self.augment_list)
| 37.775701
| 97
| 0.540327
|
258fab533f732784b24fd33c973697688ca0c86a
| 1,857
|
py
|
Python
|
example.py
|
fgaim/artext
|
88d8a27180aabe949eb18c287100de95d894fe53
|
[
"MIT"
] | 3
|
2018-11-23T19:37:18.000Z
|
2020-07-26T16:51:08.000Z
|
example.py
|
fgaim/artext
|
88d8a27180aabe949eb18c287100de95d894fe53
|
[
"MIT"
] | 1
|
2020-03-08T18:59:17.000Z
|
2020-03-08T18:59:17.000Z
|
example.py
|
fgaim/artext
|
88d8a27180aabe949eb18c287100de95d894fe53
|
[
"MIT"
] | null | null | null |
import sys
from artext import config, utils, Artext
if __name__ == "__main__":
parser = utils.arg_parser()
args = parser.parse_args('-src test -out test -n 5'.split() + sys.argv[1:])
conf = config.Config()
conf.error_rate = args.error_rate
conf.path_protected_tokens = args.protected_tokens
conf.samples = args.samples
conf.separator = args.separator
artxt = Artext(config=conf)
# Sentence Level
print('Sentence Level')
sent = "So, I think if we have to go somewhere on foot, we must put on our hat."
learner = "So, I think if we have to go somewhere on foot, we must put our hat."
print('Input (Lang-8 target):\n{}\n'.format(sent))
print('Human (Lang-8 source):\n{}\n'.format(learner))
noises = artxt.noise_sentence(sent)
print('Artext:')
for noise in noises:
print('-', noise)
# Document Level
print('\nDocument Level')
doc = """This morning I found out that one of my favourite bands released a new album.
I already forgot about Rise Against and it is a great surprise for me, because I haven't listened to them for 2 years.
I hope this band didn't become worse, like many others big ones did, and I 'll enjoy listening to it.
Well, I just have to get it and check it out."""
learner = """This morning I found out that one of my favourite band released his new album.
I already forgot about Rise Against an it is a great surprise for me, because I didn't return to them for 2 years.
I hope this band did n't become worse yet like many others big ones and I'll enjoy listening it.
Well , there remains to get it and check it out."""
print('Input (Lang-8 target):\n{}\n'.format(doc))
print('Human (Lang-8 source):\n{}\n'.format(learner))
noises = artxt.noise_document(doc)
print('Artext:')
for noise in noises:
print('-', noise)
| 41.266667
| 118
| 0.683899
|
7a70ca90c80fc5a2ba9d9cedad86d6f25d3b6cdf
| 1,112
|
py
|
Python
|
share/rpcauth/rpcauth.py
|
1515295/Iridium
|
694c388b85680629364f683d8dd401c68975b900
|
[
"MIT"
] | null | null | null |
share/rpcauth/rpcauth.py
|
1515295/Iridium
|
694c388b85680629364f683d8dd401c68975b900
|
[
"MIT"
] | null | null | null |
share/rpcauth/rpcauth.py
|
1515295/Iridium
|
694c388b85680629364f683d8dd401c68975b900
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to iridium.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| 26.47619
| 79
| 0.728417
|
0a1915ce3a21366fe733a220393b2121d0069398
| 5,662
|
py
|
Python
|
openaerostruct/tests/test_aero_analysis_Sref.py
|
carlosferpereira/OpenAeroStruct
|
35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d
|
[
"Apache-2.0"
] | null | null | null |
openaerostruct/tests/test_aero_analysis_Sref.py
|
carlosferpereira/OpenAeroStruct
|
35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d
|
[
"Apache-2.0"
] | null | null | null |
openaerostruct/tests/test_aero_analysis_Sref.py
|
carlosferpereira/OpenAeroStruct
|
35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d
|
[
"Apache-2.0"
] | 1
|
2021-04-09T16:45:27.000Z
|
2021-04-09T16:45:27.000Z
|
from openmdao.utils.assert_utils import assert_rel_error
import unittest
import numpy as np
from openaerostruct.geometry.utils import generate_mesh
from openaerostruct.geometry.geometry_group import Geometry
from openaerostruct.aerodynamics.aero_groups import AeroPoint
import openmdao.api as om
class Test(unittest.TestCase):
def test(self):
# Create a dictionary to store options about the surface
mesh_dict = {'num_y' : 7,
'num_x' : 3,
'wing_type' : 'CRM',
'symmetry' : True,
'num_twist_cp' : 5}
mesh, twist_cp = generate_mesh(mesh_dict)
surf_dict = {
# Wing definition
'name' : 'wing', # name of the surface
'symmetry' : True, # if true, model one half of wing
# reflected across the plane y = 0
'S_ref_type' : 'wetted', # how we compute the wing area,
# can be 'wetted' or 'projected'
'fem_model_type' : 'tube',
'twist_cp' : twist_cp,
'mesh' : mesh,
# Aerodynamic performance of the lifting surface at
# an angle of attack of 0 (alpha=0).
# These CL0 and CD0 values are added to the CL and CD
# obtained from aerodynamic analysis of the surface to get
# the total CL and CD.
# These CL0 and CD0 values do not vary wrt alpha.
'CL0' : 0.0, # CL of the surface at alpha=0
'CD0' : 0.015, # CD of the surface at alpha=0
# Airfoil properties for viscous drag calculation
'k_lam' : 0.05, # percentage of chord with laminar
# flow, used for viscous drag
't_over_c_cp' : np.array([0.15]), # thickness over chord ratio (NACA0015)
'c_max_t' : .303, # chordwise location of maximum (NACA0015)
# thickness
'with_viscous' : True, # if true, compute viscous drag
'with_wave' : False, # if true, compute wave drag
}
surfaces = [surf_dict]
# Create the problem and the model group
prob = om.Problem()
indep_var_comp = om.IndepVarComp()
indep_var_comp.add_output('v', val=248.136, units='m/s')
indep_var_comp.add_output('alpha', val=5., units='deg')
indep_var_comp.add_output('Mach_number', val=0.84)
indep_var_comp.add_output('re', val=1.e6, units='1/m')
indep_var_comp.add_output('rho', val=0.38, units='kg/m**3')
indep_var_comp.add_output('cg', val=np.zeros((3)), units='m')
indep_var_comp.add_output('S_ref_total', val=150.0, units='m**2')
prob.model.add_subsystem('prob_vars',
indep_var_comp,
promotes=['*'])
# Loop over each surface in the surfaces list
for surface in surfaces:
geom_group = Geometry(surface=surface)
# Add tmp_group to the problem as the name of the surface.
# Note that is a group and performance group for each
# individual surface.
prob.model.add_subsystem(surface['name'], geom_group)
# Loop through and add a certain number of aero points
for i in range(1):
# Create the aero point group and add it to the model
aero_group = AeroPoint(surfaces=surfaces, user_specified_Sref=True)
point_name = 'aero_point_{}'.format(i)
prob.model.add_subsystem(point_name, aero_group)
# Connect flow properties to the analysis point
prob.model.connect('v', point_name + '.v')
prob.model.connect('alpha', point_name + '.alpha')
prob.model.connect('Mach_number', point_name + '.Mach_number')
prob.model.connect('re', point_name + '.re')
prob.model.connect('rho', point_name + '.rho')
prob.model.connect('cg', point_name + '.cg')
prob.model.connect('S_ref_total', point_name + '.S_ref_total')
# Connect the parameters within the model for each aero point
for surface in surfaces:
name = surface['name']
# Connect the mesh from the geometry component to the analysis point
prob.model.connect(name + '.mesh', point_name + '.' + name + '.def_mesh')
# Perform the connections with the modified names within the
# 'aero_states' group.
prob.model.connect(name + '.mesh', point_name + '.aero_states.' + name + '_def_mesh')
prob.model.connect(name + '.t_over_c', point_name + '.' + name + '_perf.' + 't_over_c')
# recorder = om.SqliteRecorder("aero_analysis.db")
# prob.driver.add_recorder(recorder)
# prob.driver.recording_options['record_derivatives'] = True
# prob.driver.recording_options['includes'] = ['*']
# Set up the problem
prob.setup()
# om.view_model(prob)
prob.run_driver()
assert_rel_error(self, prob['aero_point_0.CD'][0], 0.10534816690971655, 1e-6)
assert_rel_error(self, prob['aero_point_0.CL'][0], 1.4158238516533308, 1e-6)
assert_rel_error(self, prob['aero_point_0.CM'][1], -4.806188698195504, 1e-6)
if __name__ == '__main__':
unittest.main()
| 41.940741
| 103
| 0.557753
|
aad19ee81c99fd8c11f864ff05b67c8cfc3e4a57
| 11,299
|
py
|
Python
|
nuplan/common/maps/nuplan_map/test/test_nuplan_map.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 128
|
2021-12-06T15:41:14.000Z
|
2022-03-29T13:16:32.000Z
|
nuplan/common/maps/nuplan_map/test/test_nuplan_map.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 28
|
2021-12-11T08:11:31.000Z
|
2022-03-25T02:35:43.000Z
|
nuplan/common/maps/nuplan_map/test/test_nuplan_map.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 14
|
2021-12-11T04:12:26.000Z
|
2022-03-24T06:38:30.000Z
|
from typing import Any, Dict
import numpy as np
import pytest
from nuplan.common.actor_state.state_representation import Point2D
from nuplan.common.maps.abstract_map import SemanticMapLayer
from nuplan.common.maps.abstract_map_objects import Lane
from nuplan.common.maps.nuplan_map.map_factory import NuPlanMapFactory
from nuplan.common.maps.test_utils import add_map_objects_to_scene
from nuplan.common.utils.testing.nuplan_test import NUPLAN_TEST_PLUGIN, nuplan_test
from nuplan.database.tests.nuplan_db_test_utils import get_test_maps_db
maps_db = get_test_maps_db()
map_factory = NuPlanMapFactory(maps_db)
@nuplan_test(path='json/baseline/baseline_in_lane.json')
def test_is_in_layer_lane(scene: Dict[str, Any]) -> None:
"""
Test is in lane.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker in scene["markers"]:
pose = marker["pose"]
assert nuplan_map.is_in_layer(Point2D(pose[0], pose[1]), SemanticMapLayer.LANE)
@nuplan_test(path='json/baseline/baseline_in_intersection.json')
def test_is_in_layer_intersection(scene: Dict[str, Any]) -> None:
"""
Test is in intersection.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker in scene["markers"]:
pose = marker["pose"]
assert nuplan_map.is_in_layer(Point2D(pose[0], pose[1]), SemanticMapLayer.INTERSECTION)
@nuplan_test(path='json/baseline/baseline_in_lane.json')
def test_get_lane(scene: Dict[str, Any]) -> None:
"""
Test getting one lane.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker, expected_speed_limit in zip(scene["markers"], scene["xtr"]["expected_speed_limit"]):
pose = marker["pose"]
point = Point2D(pose[0], pose[1])
lane = nuplan_map.get_one_map_object(point, SemanticMapLayer.LANE)
assert lane is not None
assert lane.contains_point(point)
assert lane.speed_limit_mps == pytest.approx(expected_speed_limit)
add_map_objects_to_scene(scene, [lane])
@nuplan_test(path='json/baseline/no_baseline.json')
def test_no_baseline(scene: Dict[str, Any]) -> None:
"""
Test when there is no baseline.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker in scene["markers"]:
pose = marker["pose"]
lane: Lane = nuplan_map.get_one_map_object(Point2D(pose[0], pose[1]), SemanticMapLayer.LANE)
assert lane is None
lane_connector = nuplan_map.get_all_map_objects(Point2D(pose[0], pose[1]), SemanticMapLayer.LANE_CONNECTOR)
assert not lane_connector
@nuplan_test(path='json/baseline/baseline_in_intersection.json')
def test_get_lane_connector(scene: Dict[str, Any]) -> None:
"""
Test getting lane connectors.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
idx = 0
for marker in scene["markers"]:
pose = marker["pose"]
point = Point2D(pose[0], pose[1])
lane_connectors = nuplan_map.get_all_map_objects(point, SemanticMapLayer.LANE_CONNECTOR)
assert lane_connectors is not None
add_map_objects_to_scene(scene, lane_connectors)
for lane_connector in lane_connectors:
assert lane_connector.contains_point(point)
assert lane_connector.speed_limit_mps == pytest.approx(scene["xtr"]["expected_speed_limit"][idx])
idx += 1
pose = scene["markers"][0]["pose"]
with pytest.raises(AssertionError):
nuplan_map.get_one_map_object(Point2D(pose[0], pose[1]), SemanticMapLayer.LANE_CONNECTOR)
@nuplan_test(path='json/get_nearest/lane.json')
def test_get_nearest_lane(scene: Dict[str, Any]) -> None:
"""
Test getting nearest lane.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker, expected_distance, expected_id in zip(
scene["markers"], scene["xtr"]["expected_nearest_distance"], scene["xtr"]["expected_nearest_id"]
):
pose = marker["pose"]
lane_id, distance = nuplan_map.get_distance_to_nearest_map_object(
Point2D(pose[0], pose[1]), SemanticMapLayer.LANE
)
assert lane_id == expected_id
assert distance == expected_distance
lane = nuplan_map.get_map_object(str(lane_id), SemanticMapLayer.LANE)
add_map_objects_to_scene(scene, [lane])
@nuplan_test(path='json/get_nearest/lane_connector.json')
def test_get_nearest_lane_connector(scene: Dict[str, Any]) -> None:
"""
Test getting nearest lane connector.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker, expected_distance, expected_id in zip(
scene["markers"], scene["xtr"]["expected_nearest_distance"], scene["xtr"]["expected_nearest_id"]
):
pose = marker["pose"]
lane_connector_id, distance = nuplan_map.get_distance_to_nearest_map_object(
Point2D(pose[0], pose[1]), SemanticMapLayer.LANE_CONNECTOR
)
# TODO: restore checks
# assert lane_connector_id != -1
# assert distance != np.NaN
lane_connector = nuplan_map.get_map_object(str(lane_connector_id), SemanticMapLayer.LANE_CONNECTOR)
add_map_objects_to_scene(scene, [lane_connector])
@nuplan_test(path='json/baseline/baseline_in_lane.json')
def test_get_roadblock(scene: Dict[str, Any]) -> None:
"""
Test getting one roadblock.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker in scene["markers"]:
pose = marker["pose"]
point = Point2D(pose[0], pose[1])
roadblock = nuplan_map.get_one_map_object(point, SemanticMapLayer.ROADBLOCK)
assert roadblock is not None
assert roadblock.contains_point(point)
add_map_objects_to_scene(scene, [roadblock])
@nuplan_test(path='json/baseline/baseline_in_intersection.json')
def test_get_roadblock_connector(scene: Dict[str, Any]) -> None:
"""
Test getting roadblock connectors.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker in scene["markers"]:
pose = marker["pose"]
point = Point2D(pose[0], pose[1])
roadblock_connectors = nuplan_map.get_all_map_objects(point, SemanticMapLayer.ROADBLOCK_CONNECTOR)
assert roadblock_connectors is not None
add_map_objects_to_scene(scene, roadblock_connectors)
for roadblock_connector in roadblock_connectors:
assert roadblock_connector.contains_point(point)
pose = scene["markers"][0]["pose"]
with pytest.raises(AssertionError):
nuplan_map.get_one_map_object(Point2D(pose[0], pose[1]), SemanticMapLayer.ROADBLOCK_CONNECTOR)
@nuplan_test(path='json/get_nearest/lane.json')
def test_get_nearest_roadblock(scene: Dict[str, Any]) -> None:
"""
Test getting nearest roadblock.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker in scene["markers"]:
pose = marker["pose"]
roadblock_id, distance = nuplan_map.get_distance_to_nearest_map_object(
Point2D(pose[0], pose[1]), SemanticMapLayer.ROADBLOCK
)
roadblock = nuplan_map.get_map_object(str(roadblock_id), SemanticMapLayer.ROADBLOCK)
assert roadblock_id
add_map_objects_to_scene(scene, [roadblock])
@nuplan_test(path='json/get_nearest/lane_connector.json')
def test_get_nearest_roadblock_connector(scene: Dict[str, Any]) -> None:
"""
Test getting nearest roadblock connector.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker in scene["markers"]:
pose = marker["pose"]
roadblock_connector_id, distance = nuplan_map.get_distance_to_nearest_map_object(
Point2D(pose[0], pose[1]), SemanticMapLayer.ROADBLOCK_CONNECTOR
)
assert roadblock_connector_id != -1
assert distance != np.NaN
roadblock_connector = nuplan_map.get_map_object(
str(roadblock_connector_id), SemanticMapLayer.ROADBLOCK_CONNECTOR
)
assert roadblock_connector
add_map_objects_to_scene(scene, [roadblock_connector])
@nuplan_test(path='json/neighboring/all_map_objects.json')
def test_get_proximal_map_objects(scene: Dict[str, Any]) -> None:
"""
Test get_neighbor_lanes.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
marker = scene["markers"][0]
pose = marker["pose"]
map_objects = nuplan_map.get_proximal_map_objects(
Point2D(pose[0], pose[1]),
40,
[
SemanticMapLayer.LANE,
SemanticMapLayer.LANE_CONNECTOR,
SemanticMapLayer.ROADBLOCK,
SemanticMapLayer.ROADBLOCK_CONNECTOR,
SemanticMapLayer.STOP_LINE,
SemanticMapLayer.CROSSWALK,
SemanticMapLayer.INTERSECTION,
],
)
assert len(map_objects[SemanticMapLayer.LANE]) == scene["xtr"]["expected_num_lanes"]
assert len(map_objects[SemanticMapLayer.LANE_CONNECTOR]) == scene["xtr"]["expected_num_lane_connectors"]
assert len(map_objects[SemanticMapLayer.ROADBLOCK]) == scene["xtr"]["expected_num_roadblocks"]
assert len(map_objects[SemanticMapLayer.ROADBLOCK_CONNECTOR]) == scene["xtr"]["expected_num_roadblock_connectors"]
assert len(map_objects[SemanticMapLayer.STOP_LINE]) == scene["xtr"]["expected_num_stop_lines"]
assert len(map_objects[SemanticMapLayer.CROSSWALK]) == scene["xtr"]["expected_num_cross_walks"]
assert len(map_objects[SemanticMapLayer.INTERSECTION]) == scene["xtr"]["expected_num_intersections"]
for layer, map_objects in map_objects.items():
add_map_objects_to_scene(scene, map_objects, layer)
@nuplan_test()
def test_unsupported_neighbor_map_objects() -> None:
"""
Test throw if unsupported layer is queried.
"""
nuplan_map = map_factory.build_map_from_name("us-nv-las-vegas-strip")
with pytest.raises(AssertionError):
nuplan_map.get_proximal_map_objects(
Point2D(0, 0),
15,
[
SemanticMapLayer.LANE,
SemanticMapLayer.LANE_CONNECTOR,
SemanticMapLayer.ROADBLOCK,
SemanticMapLayer.ROADBLOCK_CONNECTOR,
SemanticMapLayer.STOP_LINE,
SemanticMapLayer.CROSSWALK,
SemanticMapLayer.INTERSECTION,
SemanticMapLayer.TRAFFIC_LIGHT,
],
)
@nuplan_test()
def test_get_available_map_objects() -> None:
"""
Test getting available map objects for all SemanticMapLayers.
"""
nuplan_map = map_factory.build_map_from_name("us-nv-las-vegas-strip")
assert set(nuplan_map.get_available_map_objects()) == {
SemanticMapLayer.LANE,
SemanticMapLayer.LANE_CONNECTOR,
SemanticMapLayer.ROADBLOCK,
SemanticMapLayer.ROADBLOCK_CONNECTOR,
SemanticMapLayer.STOP_LINE,
SemanticMapLayer.CROSSWALK,
SemanticMapLayer.INTERSECTION,
SemanticMapLayer.WALKWAYS,
SemanticMapLayer.CARPARK_AREA,
SemanticMapLayer.PUDO,
}
if __name__ == "__main__":
raise SystemExit(pytest.main([__file__], plugins=[NUPLAN_TEST_PLUGIN]))
| 35.531447
| 118
| 0.695991
|
4305b3c230dca277292bd605d994cd3fae7cfcc9
| 286
|
py
|
Python
|
codingame/practice/15_temperatures.py
|
thanosa/coding-challenges
|
a10b0de51da076a4bcc798b4a3d5a08e29c5af01
|
[
"MIT"
] | null | null | null |
codingame/practice/15_temperatures.py
|
thanosa/coding-challenges
|
a10b0de51da076a4bcc798b4a3d5a08e29c5af01
|
[
"MIT"
] | null | null | null |
codingame/practice/15_temperatures.py
|
thanosa/coding-challenges
|
a10b0de51da076a4bcc798b4a3d5a08e29c5af01
|
[
"MIT"
] | null | null | null |
'''
https://www.codingame.com/training/easy/temperatures
'''
n = int(input())
b = 999999999
for i in input().split():
t = int(i)
if abs(t) < abs(b):
b = t
elif abs(t) == abs(b):
b = max(t, b)
if b == 999999999:
b = 0
print(b)
| 13
| 53
| 0.468531
|
2a811f83f3af87ef39d8dbe576a3120b9b9ec07d
| 1,355
|
py
|
Python
|
pl_examples/basic_examples/gpu_template.py
|
cmpute/pytorch-lightning
|
695e0514f8e60a88f49786c33311f223be2e7357
|
[
"Apache-2.0"
] | 1
|
2020-09-18T04:30:37.000Z
|
2020-09-18T04:30:37.000Z
|
pl_examples/basic_examples/gpu_template.py
|
cmpute/pytorch-lightning
|
695e0514f8e60a88f49786c33311f223be2e7357
|
[
"Apache-2.0"
] | null | null | null |
pl_examples/basic_examples/gpu_template.py
|
cmpute/pytorch-lightning
|
695e0514f8e60a88f49786c33311f223be2e7357
|
[
"Apache-2.0"
] | null | null | null |
"""
Runs a model on a single node across multiple gpus.
"""
import os
from argparse import ArgumentParser
from pytorch_lightning import Trainer, seed_everything
from pl_examples.models.lightning_template import LightningTemplateModel
seed_everything(234)
def main(args):
"""
Main training routine specific for this project
:param hparams:
"""
# ------------------------
# 1 INIT LIGHTNING MODEL
# ------------------------
model = LightningTemplateModel(**vars(args))
# ------------------------
# 2 INIT TRAINER
# ------------------------
trainer = Trainer.from_argparse_args(args)
# ------------------------
# 3 START TRAINING
# ------------------------
trainer.fit(model)
def run_cli():
# ------------------------
# TRAINING ARGUMENTS
# ------------------------
# these are project-wide arguments
root_dir = os.path.dirname(os.path.realpath(__file__))
parent_parser = ArgumentParser(add_help=False)
# each LightningModule defines arguments relevant to it
parser = LightningTemplateModel.add_model_specific_args(parent_parser, root_dir)
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args()
# ---------------------
# RUN TRAINING
# ---------------------
main(args)
if __name__ == '__main__':
run_cli()
| 24.196429
| 84
| 0.566052
|
3e1c80b5a8f5755276d67dcd9cf66dc8966c215d
| 5,081
|
py
|
Python
|
ribbon/eureka/discovery_enabled_niws_server_list.py
|
haribo0915/Spring-Cloud-in-Python
|
0bcd7093869c797df14428bf2d1b0a779f96e573
|
[
"Apache-2.0"
] | 5
|
2020-10-06T09:48:23.000Z
|
2020-10-07T13:19:46.000Z
|
ribbon/eureka/discovery_enabled_niws_server_list.py
|
haribo0915/Spring-Cloud-in-Python
|
0bcd7093869c797df14428bf2d1b0a779f96e573
|
[
"Apache-2.0"
] | 5
|
2020-10-05T09:57:01.000Z
|
2020-10-12T19:52:48.000Z
|
ribbon/eureka/discovery_enabled_niws_server_list.py
|
haribo0915/Spring-Cloud-in-Python
|
0bcd7093869c797df14428bf2d1b0a779f96e573
|
[
"Apache-2.0"
] | 8
|
2020-10-05T06:34:49.000Z
|
2020-10-07T13:19:46.000Z
|
# -*- coding: utf-8 -*-
# scip plugin
from spring_cloud.utils import logging
__author__ = "MJ (tsngmj@gmail.com)"
__license__ = "Apache 2.0"
# standard library
from typing import List
# scip plugin
from eureka.client.app_info import InstanceInfo
from ribbon.client.config.client_config import ClientConfig
from ribbon.eureka.discovery_enabled_server import DiscoveryEnabledServer
from ribbon.loadbalancer.server import Server
from ribbon.loadbalancer.server_list import ServerList
class DiscoveryEnabledNIWSServerList(ServerList):
def __init__(self, eureka_client=None, vip_addresses: str = None, client_config: ClientConfig = None):
self.logger = logging.getLogger("ribbon.eureka.DiscoveryEnabledNIWSServerList")
self.__eureka_client = eureka_client
self.__vip_addresses = self.__split_vip_addresses(vip_addresses)
self.__client_config = client_config or self._create_client_config()
self.__client_name = self.__client_config.get_property("CLIENT_NAME")
self.__is_secure = bool(self.__client_config.get_property("IS_SECURE"))
self.__override_port = int(self.__client_config.get_property("PORT"))
self.__prioritize_vip_address_based_servers = bool(
self.__client_config.get_property("PRIORITIZE_VIP_ADDRESS_BASED_SERVERS")
)
self._should_use_ip_address = bool(self.__client_config.get_property("USE_IPADDRESS_FOR_SERVER"))
if bool(self.__client_config.get_property("FORCE_CLIENT_PORT_CONFIGURATION")) and self.__is_secure:
self.should_use_override_port = True
else:
self.should_use_override_port = False
@staticmethod
def _create_client_config():
client_config = ClientConfig()
client_config.load_default_values()
return client_config
@property
def eureka_client(self):
return self.__eureka_client
@eureka_client.setter
def eureka_client(self, eureka_client):
"""
TODO: Will be removed when eureka_client's type resolved.
"""
self.__eureka_client = eureka_client
@property
def vip_addresses(self) -> List[str]:
return self.__vip_addresses
@vip_addresses.setter
def vip_addresses(self, vip_addresses: str):
self.__vip_addresses = self.__split_vip_addresses(vip_addresses)
@property
def filter(self):
"""
TODO: Not implement for minimum version
"""
return None
@filter.setter
def filter(self):
"""
TODO: Not implement for minimum version
"""
pass
@property
def initial_list_of_servers(self) -> DiscoveryEnabledServer:
self.logger.trace("Initialize servers via the eureka discovery client...")
return self.obtain_servers_via_discovery()
@property
def updated_list_of_servers(self) -> DiscoveryEnabledServer:
self.logger.trace("Obtaining servers via the eureka discovery client...")
return self.obtain_servers_via_discovery()
def obtain_servers_via_discovery(self) -> List[Server]:
server_list: List[Server] = []
if self.__vip_addresses and self.__eureka_client:
for vip_address in self.__vip_addresses:
instance_info_list: List[InstanceInfo] = self.__eureka_client.get_instances_by_virtual_host_name(
vip_address, self.__is_secure
)
server_list = self.__extract_server_list(instance_info_list)
if len(server_list) and self.__prioritize_vip_address_based_servers:
break
return server_list
def __extract_server_list(self, instance_info_list):
return [
self._create_server(
self.__set_instance_info_port(instance_info), self.__is_secure, self._should_use_ip_address
)
for instance_info in instance_info_list
if instance_info.status is InstanceInfo.Status.UP
]
def __set_instance_info_port(self, instance_info) -> InstanceInfo:
if self.should_use_override_port:
if self.__is_secure:
instance_info.secure_port = self.__override_port
else:
instance_info.port = self.__override_port
return instance_info
@staticmethod
def __split_vip_addresses(vip_addresses: str):
if vip_addresses:
return vip_addresses.strip().replace(" ", "").split(",")
else:
return []
@staticmethod
def _create_server(
instance_info: InstanceInfo, is_secure: bool, should_use_ip_address: bool
) -> DiscoveryEnabledServer:
server: DiscoveryEnabledServer = DiscoveryEnabledServer(instance_info, is_secure, should_use_ip_address)
return server
def __str__(self):
msg = (
f"DiscoveryEnabledNIWSServerList: \n"
f"ClientName: {self.__client_config}\n"
f"Effective vipAddresses: {self.__vip_addresses}\n"
f"IsSecure: {self.__is_secure}\n"
)
return msg
| 35.284722
| 113
| 0.687069
|
7d366f10479bd25b550df21dd8eaebaa8126c618
| 6,523
|
py
|
Python
|
siteblog/siteblog/settings.py
|
vladislavnet/siteblog
|
f8e0b139c974a78d5de17671768c34d214c025fe
|
[
"Unlicense"
] | null | null | null |
siteblog/siteblog/settings.py
|
vladislavnet/siteblog
|
f8e0b139c974a78d5de17671768c34d214c025fe
|
[
"Unlicense"
] | null | null | null |
siteblog/siteblog/settings.py
|
vladislavnet/siteblog
|
f8e0b139c974a78d5de17671768c34d214c025fe
|
[
"Unlicense"
] | null | null | null |
"""
Django settings for siteblog project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@62hr1i51-8=^m#fwq%&h=s_84j%ybueff2vx4g+ld+5h-nduh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'ckeditor',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'siteblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'siteblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'siteblog/static'),
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
INTERNAL_IPS = ['127.0.0.1']
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_CONFIGS = {
'default': {
'skin': 'moono-lisa',
'toolbar_Basic': [
['Source', '-', 'Bold', 'Italic']
],
'toolbar_YourCustomToolbarConfig': [
{'name': 'document', 'items': ['Source', '-', 'Save', 'NewPage', 'Preview', 'Print', '-', 'Templates']},
{'name': 'clipboard', 'items': ['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo', 'Redo']},
{'name': 'editing', 'items': ['Find', 'Replace', '-', 'SelectAll']},
{'name': 'forms',
'items': ['Form', 'Checkbox', 'Radio', 'TextField', 'Textarea', 'Select', 'Button', 'ImageButton',
'HiddenField']},
'/',
{'name': 'basicstyles',
'items': ['Bold', 'Italic', 'Underline', 'Strike', 'Subscript', 'Superscript', '-', 'RemoveFormat']},
{'name': 'paragraph',
'items': ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote', 'CreateDiv', '-',
'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock', '-', 'BidiLtr', 'BidiRtl',
'Language']},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']},
{'name': 'insert',
'items': ['Image', 'Flash', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar', 'PageBreak', 'Iframe']},
'/',
{'name': 'styles', 'items': ['Styles', 'Format', 'Font', 'FontSize']},
{'name': 'colors', 'items': ['TextColor', 'BGColor']},
{'name': 'tools', 'items': ['Maximize', 'ShowBlocks']},
{'name': 'about', 'items': ['About']},
'/', # put this to force next toolbar on new line
{'name': 'yourcustomtools', 'items': [
# put the name of your editor.ui.addButton here
'Preview',
'Maximize',
]},
],
'toolbar': 'YourCustomToolbarConfig', # put selected toolbar config here
# 'toolbarGroups': [{ 'name': 'document', 'groups': [ 'mode', 'document', 'doctools' ] }],
# 'height': 291,
# 'width': '100%',
# 'filebrowserWindowHeight': 725,
# 'filebrowserWindowWidth': 940,
# 'toolbarCanCollapse': True,
# 'mathJaxLib': '//cdn.mathjax.org/mathjax/2.2-latest/MathJax.js?config=TeX-AMS_HTML',
'tabSpaces': 4,
'extraPlugins': ','.join([
'uploadimage', # the upload image feature
# your extra plugins here
'div',
'autolink',
'autoembed',
'embedsemantic',
'autogrow',
# 'devtools',
'widget',
'lineutils',
'clipboard',
'dialog',
'dialogui',
'elementspath'
]),
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(BASE_DIR, 'django_cache'),
}
}
| 30.914692
| 120
| 0.591906
|
ff2ceb747ca71d64418c2e8493b9663f3437dd91
| 4,243
|
py
|
Python
|
odoo-13.0/addons/website_sale_delivery/models/sale_order.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/website_sale_delivery/models/sale_order.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/website_sale_delivery/models/sale_order.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models
_logger = logging.getLogger(__name__)
class SaleOrder(models.Model):
_inherit = 'sale.order'
amount_delivery = fields.Monetary(
compute='_compute_amount_delivery',
string='Delivery Amount',
help="The amount without tax.", store=True, tracking=True)
def _compute_website_order_line(self):
super(SaleOrder, self)._compute_website_order_line()
for order in self:
order.website_order_line = order.website_order_line.filtered(lambda l: not l.is_delivery)
@api.depends('order_line.price_unit', 'order_line.tax_id', 'order_line.discount', 'order_line.product_uom_qty')
def _compute_amount_delivery(self):
for order in self:
if self.env.user.has_group('account.group_show_line_subtotals_tax_excluded'):
order.amount_delivery = sum(order.order_line.filtered('is_delivery').mapped('price_subtotal'))
else:
order.amount_delivery = sum(order.order_line.filtered('is_delivery').mapped('price_total'))
def _check_carrier_quotation(self, force_carrier_id=None):
self.ensure_one()
DeliveryCarrier = self.env['delivery.carrier']
if self.only_services:
self.write({'carrier_id': None})
self._remove_delivery_line()
return True
else:
# attempt to use partner's preferred carrier
if not force_carrier_id and self.partner_shipping_id.property_delivery_carrier_id:
force_carrier_id = self.partner_shipping_id.property_delivery_carrier_id.id
carrier = force_carrier_id and DeliveryCarrier.browse(force_carrier_id) or self.carrier_id
available_carriers = self._get_delivery_methods()
if carrier:
if carrier not in available_carriers:
carrier = DeliveryCarrier
else:
# set the forced carrier at the beginning of the list to be verfied first below
available_carriers -= carrier
available_carriers = carrier + available_carriers
if force_carrier_id or not carrier or carrier not in available_carriers:
for delivery in available_carriers:
verified_carrier = delivery._match_address(self.partner_shipping_id)
if verified_carrier:
carrier = delivery
break
self.write({'carrier_id': carrier.id})
self._remove_delivery_line()
if carrier:
res = carrier.rate_shipment(self)
if res.get('success'):
self.set_delivery_line(carrier, res['price'])
self.delivery_rating_success = True
self.delivery_message = res['warning_message']
else:
self.set_delivery_line(carrier, 0.0)
self.delivery_rating_success = False
self.delivery_message = res['error_message']
return bool(carrier)
def _get_delivery_methods(self):
address = self.partner_shipping_id
# searching on website_published will also search for available website (_search method on computed field)
return self.env['delivery.carrier'].sudo().search([('website_published', '=', True)]).available_carriers(address)
def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs):
""" Override to update carrier quotation if quantity changed """
self._remove_delivery_line()
# When you update a cart, it is not enouf to remove the "delivery cost" line
# The carrier might also be invalid, eg: if you bought things that are too heavy
# -> this may cause a bug if you go to the checkout screen, choose a carrier,
# then update your cart (the cart becomes uneditable)
self.write({'carrier_id': False})
values = super(SaleOrder, self)._cart_update(product_id, line_id, add_qty, set_qty, **kwargs)
return values
| 45.623656
| 121
| 0.64412
|
8a0493ac922e49cea2a3731cd878aa4ecd143e37
| 544
|
py
|
Python
|
tests/macros/test_tag_macros.py
|
woodrush/hy
|
d9a5acbcc93114031c70fd7ea497e4e59c868e25
|
[
"MIT"
] | 4
|
2017-08-09T01:31:56.000Z
|
2022-01-17T01:11:23.000Z
|
tests/macros/test_tag_macros.py
|
woodrush/hy
|
d9a5acbcc93114031c70fd7ea497e4e59c868e25
|
[
"MIT"
] | null | null | null |
tests/macros/test_tag_macros.py
|
woodrush/hy
|
d9a5acbcc93114031c70fd7ea497e4e59c868e25
|
[
"MIT"
] | null | null | null |
# Copyright 2017 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
from hy.macros import macroexpand
from hy.compiler import HyTypeError, HyASTCompiler
from hy.lex import tokenize
def test_tag_macro_error():
"""Check if we get correct error with wrong dispatch character"""
try:
macroexpand(tokenize("(dispatch_tag_macro '- '())")[0],
HyASTCompiler(__name__))
except HyTypeError as e:
assert "with the character `-`" in str(e)
| 32
| 74
| 0.698529
|
2d05d3b9a7428af870534a96c9e8d64aab701b5f
| 8,037
|
py
|
Python
|
examples/rsa/search_inference.py
|
fluffybird2323/pyro
|
9e74e499dbda76c28f12528235dac25bd17f0b1b
|
[
"MIT"
] | 2
|
2019-01-26T01:53:31.000Z
|
2020-02-26T17:39:17.000Z
|
examples/rsa/search_inference.py
|
fluffybird2323/pyro
|
9e74e499dbda76c28f12528235dac25bd17f0b1b
|
[
"MIT"
] | null | null | null |
examples/rsa/search_inference.py
|
fluffybird2323/pyro
|
9e74e499dbda76c28f12528235dac25bd17f0b1b
|
[
"MIT"
] | 1
|
2019-02-06T14:39:57.000Z
|
2019-02-06T14:39:57.000Z
|
"""
Inference algorithms and utilities used in the RSA example models.
Adapted from: http://dippl.org/chapters/03-enumeration.html
"""
from __future__ import absolute_import, division, print_function
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer.abstract_infer import TracePosterior
from pyro.poutine.runtime import NonlocalExit
import six
from six.moves import queue
import collections
if six.PY3:
import functools
else:
import functools32 as functools
def memoize(fn=None, **kwargs):
if fn is None:
return lambda _fn: memoize(_fn, **kwargs)
return functools.lru_cache(**kwargs)(fn)
def factor(name, value):
"""
Like factor in webPPL, adds a scalar weight to the log-probability of the trace
"""
value = value if torch.is_tensor(value) else torch.tensor(value)
d = dist.Bernoulli(logits=value)
pyro.sample(name, d, obs=torch.ones(value.size()))
class HashingMarginal(dist.Distribution):
"""
:param trace_dist: a TracePosterior instance representing a Monte Carlo posterior
Marginal histogram distribution.
Turns a TracePosterior object into a Distribution
over the return values of the TracePosterior's model.
"""
def __init__(self, trace_dist, sites=None):
assert isinstance(trace_dist, TracePosterior), \
"trace_dist must be trace posterior distribution object"
if sites is None:
sites = "_RETURN"
assert isinstance(sites, (str, list)), \
"sites must be either '_RETURN' or list"
self.sites = sites
super(HashingMarginal, self).__init__()
self.trace_dist = trace_dist
has_enumerate_support = True
@memoize(maxsize=10)
def _dist_and_values(self):
# XXX currently this whole object is very inefficient
values_map, logits = collections.OrderedDict(), collections.OrderedDict()
for tr, logit in zip(self.trace_dist.exec_traces,
self.trace_dist.log_weights):
if isinstance(self.sites, str):
value = tr.nodes[self.sites]["value"]
else:
value = {site: tr.nodes[site]["value"] for site in self.sites}
if not torch.is_tensor(logit):
logit = torch.tensor(logit)
if torch.is_tensor(value):
value_hash = hash(value.cpu().contiguous().numpy().tobytes())
elif isinstance(value, dict):
value_hash = hash(self._dict_to_tuple(value))
else:
value_hash = hash(value)
if value_hash in logits:
# Value has already been seen.
logits[value_hash] = dist.util.logsumexp(torch.stack([logits[value_hash], logit]), dim=-1)
else:
logits[value_hash] = logit
values_map[value_hash] = value
logits = torch.stack(list(logits.values())).contiguous().view(-1)
logits = logits - dist.util.logsumexp(logits, dim=-1)
d = dist.Categorical(logits=logits)
return d, values_map
def sample(self):
d, values_map = self._dist_and_values()
ix = d.sample()
return list(values_map.values())[ix]
def log_prob(self, val):
d, values_map = self._dist_and_values()
if torch.is_tensor(val):
value_hash = hash(val.cpu().contiguous().numpy().tobytes())
elif isinstance(val, dict):
value_hash = hash(self._dict_to_tuple(val))
else:
value_hash = hash(val)
return d.log_prob(torch.tensor([list(values_map.keys()).index(value_hash)]))
def enumerate_support(self):
d, values_map = self._dist_and_values()
return list(values_map.values())[:]
def _dict_to_tuple(self, d):
"""
Recursively converts a dictionary to a list of key-value tuples
Only intended for use as a helper function inside HashingMarginal!!
May break when keys cant be sorted, but that is not an expected use-case
"""
if isinstance(d, dict):
return tuple([(k, self._dict_to_tuple(d[k])) for k in sorted(d.keys())])
else:
return d
def _weighted_mean(self, value, dim=0):
weights = self._dist_and_values()[0].logits
for _ in range(value.dim() - 1):
weights = weights.unsqueeze(-1)
max_val = weights.max(dim)[0]
return max_val.exp() * (value * (weights - max_val.unsqueeze(-1)).exp()).sum(dim=dim)
@property
def mean(self):
samples = torch.stack(list(self._dist_and_values()[1].values()))
return self._weighted_mean(samples) / self._weighted_mean(samples.new_tensor([1.]))
@property
def variance(self):
samples = torch.stack(list(self._dist_and_values()[1].values()))
deviation_squared = torch.pow(samples - self.mean, 2)
return self._weighted_mean(deviation_squared) / self._weighted_mean(samples.new_tensor([1.]))
########################
# Exact Search inference
########################
class Search(TracePosterior):
"""
Exact inference by enumerating over all possible executions
"""
def __init__(self, model, max_tries=int(1e6), **kwargs):
self.model = model
self.max_tries = max_tries
super(Search, self).__init__(**kwargs)
def _traces(self, *args, **kwargs):
q = queue.Queue()
q.put(poutine.Trace())
p = poutine.trace(
poutine.queue(self.model, queue=q, max_tries=self.max_tries))
while not q.empty():
tr = p.get_trace(*args, **kwargs)
yield tr, tr.log_prob_sum()
###############################################
# Best-first Search Inference
###############################################
def pqueue(fn, queue):
def sample_escape(tr, site):
return (site["name"] not in tr) and \
(site["type"] == "sample") and \
(not site["is_observed"])
def _fn(*args, **kwargs):
for i in range(int(1e6)):
assert not queue.empty(), \
"trying to get() from an empty queue will deadlock"
priority, next_trace = queue.get()
try:
ftr = poutine.trace(poutine.escape(poutine.replay(fn, next_trace),
functools.partial(sample_escape,
next_trace)))
return ftr(*args, **kwargs)
except NonlocalExit as site_container:
site_container.reset_stack()
for tr in poutine.util.enum_extend(ftr.trace.copy(),
site_container.site):
# add a little bit of noise to the priority to break ties...
queue.put((tr.log_prob_sum().item() - torch.rand(1).item() * 1e-2, tr))
raise ValueError("max tries ({}) exceeded".format(str(1e6)))
return _fn
class BestFirstSearch(TracePosterior):
"""
Inference by enumerating executions ordered by their probabilities.
Exact (and results equivalent to Search) if all executions are enumerated.
"""
def __init__(self, model, num_samples=None, **kwargs):
if num_samples is None:
num_samples = 100
self.num_samples = num_samples
self.model = model
super(BestFirstSearch, self).__init__(**kwargs)
def _traces(self, *args, **kwargs):
q = queue.PriorityQueue()
# add a little bit of noise to the priority to break ties...
q.put((torch.zeros(1).item() - torch.rand(1).item() * 1e-2, poutine.Trace()))
q_fn = pqueue(self.model, queue=q)
for i in range(self.num_samples):
if q.empty():
# num_samples was too large!
break
tr = poutine.trace(q_fn).get_trace(*args, **kwargs) # XXX should block
yield tr, tr.log_prob_sum()
| 35.25
| 106
| 0.596491
|
64dcf1dfc12f629488ffc1f1a1160660890b59ee
| 7,999
|
py
|
Python
|
distributed/diskutils.py
|
hmaarrfk/distributed
|
6caa30896e66501483416812d44c861da75ceab6
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/diskutils.py
|
hmaarrfk/distributed
|
6caa30896e66501483416812d44c861da75ceab6
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/diskutils.py
|
hmaarrfk/distributed
|
6caa30896e66501483416812d44c861da75ceab6
|
[
"BSD-3-Clause"
] | null | null | null |
import errno
import glob
import logging
import os
import shutil
import stat
import tempfile
import weakref
import dask
from . import locket
logger = logging.getLogger(__name__)
DIR_LOCK_EXT = ".dirlock"
def is_locking_enabled():
return dask.config.get("distributed.worker.use-file-locking")
def safe_unlink(path):
try:
os.unlink(path)
except EnvironmentError as e:
# Perhaps it was removed by someone else?
if e.errno != errno.ENOENT:
logger.error("Failed to remove %r", str(e))
class WorkDir(object):
"""
A temporary work directory inside a WorkSpace.
"""
def __init__(self, workspace, name=None, prefix=None):
assert name is None or prefix is None
if name is None:
self.dir_path = tempfile.mkdtemp(prefix=prefix, dir=workspace.base_dir)
else:
self.dir_path = os.path.join(workspace.base_dir, name)
os.mkdir(self.dir_path) # it shouldn't already exist
if is_locking_enabled():
try:
self._lock_path = os.path.join(self.dir_path + DIR_LOCK_EXT)
assert not os.path.exists(self._lock_path)
logger.debug("Locking %r...", self._lock_path)
# Avoid a race condition before locking the file
# by taking the global lock
try:
with workspace._global_lock():
self._lock_file = locket.lock_file(self._lock_path)
self._lock_file.acquire()
except OSError as e:
logger.exception(
"Could not acquire workspace lock on "
"path: %s ."
"Continuing without lock. "
"This may result in workspaces not being "
"cleaned up",
self._lock_path,
exc_info=True,
)
self._lock_file = None
except Exception:
shutil.rmtree(self.dir_path, ignore_errors=True)
raise
workspace._known_locks.add(self._lock_path)
self._finalizer = weakref.finalize(
self,
self._finalize,
workspace,
self._lock_path,
self._lock_file,
self.dir_path,
)
else:
self._finalizer = weakref.finalize(
self, self._finalize, workspace, None, None, self.dir_path
)
def release(self):
"""
Dispose of this directory.
"""
self._finalizer()
@classmethod
def _finalize(cls, workspace, lock_path, lock_file, dir_path):
try:
workspace._purge_directory(dir_path)
finally:
if lock_file is not None:
lock_file.release()
if lock_path is not None:
workspace._known_locks.remove(lock_path)
safe_unlink(lock_path)
class WorkSpace(object):
"""
An on-disk workspace that tracks disposable work directories inside it.
If a process crashes or another event left stale directories behind,
this will be detected and the directories purged.
"""
# Keep track of all locks known to this process, to avoid several
# WorkSpaces to step on each other's toes
_known_locks = set()
def __init__(self, base_dir):
self.base_dir = os.path.abspath(base_dir)
self._init_workspace()
self._global_lock_path = os.path.join(self.base_dir, "global.lock")
self._purge_lock_path = os.path.join(self.base_dir, "purge.lock")
def _init_workspace(self):
try:
os.mkdir(self.base_dir)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def _global_lock(self, **kwargs):
return locket.lock_file(self._global_lock_path, **kwargs)
def _purge_lock(self, **kwargs):
return locket.lock_file(self._purge_lock_path, **kwargs)
def _purge_leftovers(self):
if not is_locking_enabled():
return []
# List candidates with the global lock taken, to avoid purging
# a lock file that was just created but not yet locked
# (see WorkDir.__init__)
lock = self._global_lock(timeout=0)
try:
lock.acquire()
except locket.LockError:
# No need to waste time here if someone else is busy doing
# something on this workspace
return []
else:
try:
candidates = list(self._list_unknown_locks())
finally:
lock.release()
# No need to hold the global lock here, especially as purging
# can take time. Instead take the purge lock to avoid two
# processes purging at once.
purged = []
lock = self._purge_lock(timeout=0)
try:
lock.acquire()
except locket.LockError:
# No need for two processes to purge one after another
pass
else:
try:
for path in candidates:
if self._check_lock_or_purge(path):
purged.append(path)
finally:
lock.release()
return purged
def _list_unknown_locks(self):
for p in glob.glob(os.path.join(self.base_dir, "*" + DIR_LOCK_EXT)):
try:
st = os.stat(p)
except EnvironmentError:
# May have been removed in the meantime
pass
else:
# XXX restrict to files owned by current user?
if stat.S_ISREG(st.st_mode):
yield p
def _purge_directory(self, dir_path):
shutil.rmtree(dir_path, onerror=self._on_remove_error)
def _check_lock_or_purge(self, lock_path):
"""
Try locking the given path, if it fails it's in use,
otherwise the corresponding directory is deleted.
Return True if the lock was stale.
"""
assert lock_path.endswith(DIR_LOCK_EXT)
if lock_path in self._known_locks:
# Avoid touching a lock that we know is already taken
return False
logger.debug("Checking lock file %r...", lock_path)
lock = locket.lock_file(lock_path, timeout=0)
try:
lock.acquire()
except locket.LockError:
# Lock file still in use, ignore
return False
try:
# Lock file is stale, therefore purge corresponding directory
dir_path = lock_path[: -len(DIR_LOCK_EXT)]
if os.path.exists(dir_path):
logger.info("Found stale lock file and directory %r, purging", dir_path)
self._purge_directory(dir_path)
finally:
lock.release()
# Clean up lock file after we released it
safe_unlink(lock_path)
return True
def _on_remove_error(self, func, path, exc_info):
typ, exc, tb = exc_info
logger.error("Failed to remove %r (failed in %r): %s", path, func, str(exc))
def new_work_dir(self, **kwargs):
"""
Create and return a new WorkDir in this WorkSpace.
Either the *prefix* or *name* parameter should be given
(*prefix* is preferred as it avoids potential collisions)
Parameters
----------
prefix: str (optional)
The prefix of the temporary subdirectory name for the workdir
name: str (optional)
The subdirectory name for the workdir
"""
try:
self._purge_leftovers()
except OSError:
logger.error(
"Failed to clean up lingering worker directories " "in path: %s ",
exc_info=True,
)
return WorkDir(self, **kwargs)
| 32.782787
| 88
| 0.566821
|
e00c3d9b73c517defcaf3728489be68f62c670a4
| 566
|
py
|
Python
|
backend/src/gql/query/get_user.py
|
spiritutumduo/spiritumDuo
|
987785906cd504f46ccebe3bbfe0e81cbf02bf7c
|
[
"MIT"
] | 1
|
2022-03-11T14:07:16.000Z
|
2022-03-11T14:07:16.000Z
|
backend/src/gql/query/get_user.py
|
spiritutumduo/spiritumDuo
|
987785906cd504f46ccebe3bbfe0e81cbf02bf7c
|
[
"MIT"
] | 3
|
2022-02-25T22:46:46.000Z
|
2022-03-30T08:19:41.000Z
|
backend/src/gql/query/get_user.py
|
spiritutumduo/spiritumDuo
|
987785906cd504f46ccebe3bbfe0e81cbf02bf7c
|
[
"MIT"
] | 1
|
2022-03-31T14:35:51.000Z
|
2022-03-31T14:35:51.000Z
|
from .query_type import query
from dataloaders import UserByIdLoader, UserByUsernameLoader
from authentication.authentication import needsAuthorization
from SdTypes import Permissions
@query.field("getUser")
@needsAuthorization([Permissions.USER_READ])
async def resolve_get_user(
obj=None,
info=None,
id: int = None,
username: str = None
):
if id:
return await UserByIdLoader.load_from_id(info.context, id)
elif username:
return await UserByUsernameLoader.load_from_id(info.context, username)
else:
return None
| 26.952381
| 78
| 0.749117
|
d291d1d151be262e4188105f73bd16f48b353d81
| 2,031
|
py
|
Python
|
docs/conf.py
|
kacperstyslo/subnet-info
|
8899203f7d1c3ac858fa923aee6d0b1b1f59cbc7
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
kacperstyslo/subnet-info
|
8899203f7d1c3ac858fa923aee6d0b1b1f59cbc7
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
kacperstyslo/subnet-info
|
8899203f7d1c3ac858fa923aee6d0b1b1f59cbc7
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import subnet_info
# -- Project information -----------------------------------------------------
project = "Subnet Info"
copyright = "2022, Kacper Styslo"
author = "Kacper Styslo"
# The full version, including alpha/beta/rc tags
release = "1.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 33.295082
| 79
| 0.666174
|
242acab459481f6b3a5313494374afd52670a33a
| 7,763
|
py
|
Python
|
test/test_vlib.py
|
halvors/vpp
|
fdc17b1ae7743bc37d4e08467d0295d6f277ec12
|
[
"Apache-2.0"
] | null | null | null |
test/test_vlib.py
|
halvors/vpp
|
fdc17b1ae7743bc37d4e08467d0295d6f277ec12
|
[
"Apache-2.0"
] | null | null | null |
test/test_vlib.py
|
halvors/vpp
|
fdc17b1ae7743bc37d4e08467d0295d6f277ec12
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import unittest
import pexpect
import time
import signal
from config import config
from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
@unittest.skipUnless(config.gcov, "part of code coverage tests")
class TestVlib(VppTestCase):
""" Vlib Unit Test Cases """
vpp_worker_count = 1
@classmethod
def setUpClass(cls):
super(TestVlib, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestVlib, cls).tearDownClass()
def setUp(self):
super(TestVlib, self).setUp()
def tearDown(self):
super(TestVlib, self).tearDown()
def test_vlib_main_unittest(self):
""" Vlib main.c Code Coverage Test """
cmds = ["loopback create",
"packet-generator new {\n"
" name vlib\n"
" limit 15\n"
" size 128-128\n"
" interface loop0\n"
" node ethernet-input\n"
" data {\n"
" IP6: 00:d0:2d:5e:86:85 -> 00:0d:ea:d0:00:00\n"
" ICMP: db00::1 -> db00::2\n"
" incrementing 30\n"
" }\n"
"}\n",
"event-logger trace dispatch",
"event-logger stop",
"event-logger clear",
"event-logger resize 102400",
"event-logger restart",
"pcap dispatch trace on max 100 buffer-trace pg-input 15",
"pa en",
"show event-log 100 all",
"event-log save",
"event-log save foo",
"pcap dispatch trace",
"pcap dispatch trace status",
"pcap dispatch trace off",
"show vlib frame-allocation",
]
for cmd in cmds:
r = self.vapi.cli_return_response(cmd)
if r.retval != 0:
if hasattr(r, 'reply'):
self.logger.info(cmd + " FAIL reply " + r.reply)
else:
self.logger.info(cmd + " FAIL retval " + str(r.retval))
def test_vlib_node_cli_unittest(self):
""" Vlib node_cli.c Code Coverage Test """
cmds = ["loopback create",
"packet-generator new {\n"
" name vlib\n"
" limit 15\n"
" size 128-128\n"
" interface loop0\n"
" node ethernet-input\n"
" data {\n"
" IP6: 00:d0:2d:5e:86:85 -> 00:0d:ea:d0:00:00\n"
" ICMP: db00::1 -> db00::2\n"
" incrementing 30\n"
" }\n"
"}\n",
"show vlib graph",
"show vlib graph ethernet-input",
"show vlib graphviz",
"show vlib graphviz graphviz.dot",
"pa en",
"show runtime ethernet-input",
"show runtime brief verbose max summary",
"clear runtime",
"show node index 1",
"show node ethernet-input",
"show node pg-input",
"set node function",
"set node function no-such-node",
"set node function cdp-input default",
"set node function ethernet-input default",
"set node function ethernet-input bozo",
"set node function ethernet-input",
"show \t",
]
for cmd in cmds:
r = self.vapi.cli_return_response(cmd)
if r.retval != 0:
if hasattr(r, 'reply'):
self.logger.info(cmd + " FAIL reply " + r.reply)
else:
self.logger.info(cmd + " FAIL retval " + str(r.retval))
def test_vlib_buffer_c_unittest(self):
""" Vlib buffer.c Code Coverage Test """
cmds = ["loopback create",
"packet-generator new {\n"
" name vlib\n"
" limit 15\n"
" size 128-128\n"
" interface loop0\n"
" node ethernet-input\n"
" data {\n"
" IP6: 00:d0:2d:5e:86:85 -> 00:0d:ea:d0:00:00\n"
" ICMP: db00::1 -> db00::2\n"
" incrementing 30\n"
" }\n"
"}\n",
"event-logger trace",
"event-logger trace enable",
"event-logger trace api cli barrier",
"pa en",
"show interface bogus",
"event-logger trace disable api cli barrier",
"event-logger trace circuit-node ethernet-input",
"event-logger trace circuit-node ethernet-input disable",
"clear interfaces",
"test vlib",
"test vlib2",
"show memory api-segment stats-segment main-heap verbose",
"leak-check { show memory }",
"show cpu",
"memory-trace main-heap",
"memory-trace main-heap api-segment stats-segment",
"leak-check { show version }",
"show version ?",
"comment { show version }",
"uncomment { show version }",
"show memory main-heap",
"show memory bogus",
"choices",
"test heap-validate",
"memory-trace main-heap disable",
"show buffers",
"show eve",
"show help",
"show ip ",
]
for cmd in cmds:
r = self.vapi.cli_return_response(cmd)
if r.retval != 0:
if hasattr(r, 'reply'):
self.logger.info(cmd + " FAIL reply " + r.reply)
else:
self.logger.info(cmd + " FAIL retval " + str(r.retval))
def test_vlib_format_unittest(self):
""" Vlib format.c Code Coverage Test """
cmds = ["loopback create",
"classify filter pcap mask l2 proto match l2 proto 0x86dd",
"classify filter pcap del",
"test format-vlib",
]
for cmd in cmds:
r = self.vapi.cli_return_response(cmd)
if r.retval != 0:
if hasattr(r, 'reply'):
self.logger.info(cmd + " FAIL reply " + r.reply)
else:
self.logger.info(cmd + " FAIL retval " + str(r.retval))
def test_vlib_main_unittest(self):
""" Private Binary API Segment Test (takes 70 seconds) """
vat_path = config.vpp + '_api_test'
vat = pexpect.spawn(vat_path, ['socket-name',
self.get_api_sock_path()])
vat.expect("vat# ", timeout=10)
vat.sendline('sock_init_shm')
vat.expect("vat# ", timeout=10)
vat.sendline('sh api cli')
vat.kill(signal.SIGKILL)
vat.wait()
self.logger.info("vat terminated, 70 second wait for the Reaper")
time.sleep(70)
self.logger.info("Reaper should be complete...")
def test_pool(self):
""" Fixed-size Pool Test """
cmds = ["test pool",
]
for cmd in cmds:
r = self.vapi.cli_return_response(cmd)
if r.retval != 0:
if hasattr(r, 'reply'):
self.logger.info(cmd + " FAIL reply " + r.reply)
else:
self.logger.info(cmd + " FAIL retval " + str(r.retval))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 35.447489
| 75
| 0.473786
|
849bf8bffd92dcce816daaa0f8a55c62d57eea49
| 521
|
py
|
Python
|
src/cloudlight/__init__.py
|
joigno/cloudlight
|
8a6510047abd97e0bf3a568322205beb56fa5260
|
[
"BSD-3-Clause"
] | 3
|
2020-08-21T00:18:50.000Z
|
2020-10-21T17:40:47.000Z
|
src/cloudlight/__init__.py
|
joigno/cloudlight
|
8a6510047abd97e0bf3a568322205beb56fa5260
|
[
"BSD-3-Clause"
] | null | null | null |
src/cloudlight/__init__.py
|
joigno/cloudlight
|
8a6510047abd97e0bf3a568322205beb56fa5260
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Created on Mar 26, 2010
@author: jose
'''
from classes.graph import *
from classes.big_graph import *
from classes.big_digraph import *
from nodes.node import *
from nodes.search import *
from nodes.facebook import *
from nodes.twitter import *
from algorithms.plot import *
from algorithms.privacy import *
from algorithms.privacy import *
from algorithms.privacy_attack import *
from bots.iterator import *
from bots.iterator import *
from bots.visitor import *
from bots.builder import *
from bots.bot import *
| 21.708333
| 39
| 0.775432
|
8233e9b2d95d8c01071143aa0854b14dccccbb37
| 7,083
|
py
|
Python
|
voyager/client/models/v1beta1_certificate_details.py
|
voyager-client/python
|
d30de935c9cf30fa9e9e4c90714f3868767f4065
|
[
"Apache-2.0"
] | null | null | null |
voyager/client/models/v1beta1_certificate_details.py
|
voyager-client/python
|
d30de935c9cf30fa9e9e4c90714f3868767f4065
|
[
"Apache-2.0"
] | 1
|
2018-06-24T20:33:11.000Z
|
2018-06-24T20:33:11.000Z
|
voyager/client/models/v1beta1_certificate_details.py
|
voyager-client/python
|
d30de935c9cf30fa9e9e4c90714f3868767f4065
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Voyager
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v7.1.1
Contact: hello@appscode.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1CertificateDetails(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_ref': 'str',
'cert_stable_url': 'str',
'cert_url': 'str',
'not_after': 'datetime',
'not_before': 'datetime',
'serial_number': 'str'
}
attribute_map = {
'account_ref': 'accountRef',
'cert_stable_url': 'certStableURL',
'cert_url': 'certURL',
'not_after': 'notAfter',
'not_before': 'notBefore',
'serial_number': 'serialNumber'
}
def __init__(self, account_ref=None, cert_stable_url=None, cert_url=None, not_after=None, not_before=None, serial_number=None):
"""
V1beta1CertificateDetails - a model defined in Swagger
"""
self._account_ref = None
self._cert_stable_url = None
self._cert_url = None
self._not_after = None
self._not_before = None
self._serial_number = None
self.discriminator = None
if account_ref is not None:
self.account_ref = account_ref
self.cert_stable_url = cert_stable_url
self.cert_url = cert_url
if not_after is not None:
self.not_after = not_after
if not_before is not None:
self.not_before = not_before
if serial_number is not None:
self.serial_number = serial_number
@property
def account_ref(self):
"""
Gets the account_ref of this V1beta1CertificateDetails.
:return: The account_ref of this V1beta1CertificateDetails.
:rtype: str
"""
return self._account_ref
@account_ref.setter
def account_ref(self, account_ref):
"""
Sets the account_ref of this V1beta1CertificateDetails.
:param account_ref: The account_ref of this V1beta1CertificateDetails.
:type: str
"""
self._account_ref = account_ref
@property
def cert_stable_url(self):
"""
Gets the cert_stable_url of this V1beta1CertificateDetails.
:return: The cert_stable_url of this V1beta1CertificateDetails.
:rtype: str
"""
return self._cert_stable_url
@cert_stable_url.setter
def cert_stable_url(self, cert_stable_url):
"""
Sets the cert_stable_url of this V1beta1CertificateDetails.
:param cert_stable_url: The cert_stable_url of this V1beta1CertificateDetails.
:type: str
"""
if cert_stable_url is None:
raise ValueError("Invalid value for `cert_stable_url`, must not be `None`")
self._cert_stable_url = cert_stable_url
@property
def cert_url(self):
"""
Gets the cert_url of this V1beta1CertificateDetails.
:return: The cert_url of this V1beta1CertificateDetails.
:rtype: str
"""
return self._cert_url
@cert_url.setter
def cert_url(self, cert_url):
"""
Sets the cert_url of this V1beta1CertificateDetails.
:param cert_url: The cert_url of this V1beta1CertificateDetails.
:type: str
"""
if cert_url is None:
raise ValueError("Invalid value for `cert_url`, must not be `None`")
self._cert_url = cert_url
@property
def not_after(self):
"""
Gets the not_after of this V1beta1CertificateDetails.
:return: The not_after of this V1beta1CertificateDetails.
:rtype: datetime
"""
return self._not_after
@not_after.setter
def not_after(self, not_after):
"""
Sets the not_after of this V1beta1CertificateDetails.
:param not_after: The not_after of this V1beta1CertificateDetails.
:type: datetime
"""
self._not_after = not_after
@property
def not_before(self):
"""
Gets the not_before of this V1beta1CertificateDetails.
:return: The not_before of this V1beta1CertificateDetails.
:rtype: datetime
"""
return self._not_before
@not_before.setter
def not_before(self, not_before):
"""
Sets the not_before of this V1beta1CertificateDetails.
:param not_before: The not_before of this V1beta1CertificateDetails.
:type: datetime
"""
self._not_before = not_before
@property
def serial_number(self):
"""
Gets the serial_number of this V1beta1CertificateDetails.
:return: The serial_number of this V1beta1CertificateDetails.
:rtype: str
"""
return self._serial_number
@serial_number.setter
def serial_number(self, serial_number):
"""
Sets the serial_number of this V1beta1CertificateDetails.
:param serial_number: The serial_number of this V1beta1CertificateDetails.
:type: str
"""
self._serial_number = serial_number
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1CertificateDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.560311
| 131
| 0.599181
|
2d94fc5df32994f8a14bd92135a0a05aae4e1e67
| 2,139
|
py
|
Python
|
actions/build_manylinux.py
|
chryswoods/python_pack_and_doc
|
6d63d4e91dd93abf8f618d75c161ab65f943dadd
|
[
"Apache-2.0"
] | 5
|
2020-09-23T14:04:39.000Z
|
2020-10-02T14:01:16.000Z
|
actions/build_manylinux.py
|
chryswoods/python_pack_and_doc
|
6d63d4e91dd93abf8f618d75c161ab65f943dadd
|
[
"Apache-2.0"
] | null | null | null |
actions/build_manylinux.py
|
chryswoods/python_pack_and_doc
|
6d63d4e91dd93abf8f618d75c161ab65f943dadd
|
[
"Apache-2.0"
] | 1
|
2020-09-25T16:25:24.000Z
|
2020-09-25T16:25:24.000Z
|
import sys
import os
import subprocess
import shlex
import glob
def run_command(cmd, dry=False):
"""Run the passed shell command"""
if dry:
print(f"[DRY-RUN] {cmd}")
return
print(f"[EXECUTE] {cmd}")
try:
args = shlex.split(cmd)
subprocess.run(args).check_returncode()
except Exception as e:
print(f"[IGNORE ERROR] {e}")
sys.exit(0)
def run_docker():
PLAT = "manylinux1_x86_64"
DOCKER_IMAGE = "quay.io/pypa/manylinux1_x86_64"
pwd = os.getcwd()
pyexe = "/opt/python/cp38-cp38/bin/python3.8"
cmd = f"docker run --rm -e PLAT={PLAT} -v {pwd}:/io {DOCKER_IMAGE} " \
f"{pyexe} /io/actions/build_manylinux.py build"
run_command(cmd)
def build_wheels():
pybins = ["/opt/python/cp37-cp37m/bin", "/opt/python/cp38-cp38/bin"]
print(pybins)
PLAT = os.getenv("PLAT")
os.environ["CYTHONIZE"] = "1"
print(PLAT)
old_path = os.getenv("PATH")
old_cwd = os.getcwd()
for pybin in pybins:
print(f"\nBUILDING WHEEL FOR {pybin}\n")
print("Installing dependencies...")
sys.stdout.flush()
run_command(f"{pybin}/pip install -r /io/requirements.txt")
run_command(f"{pybin}/pip install pytest")
print("Building the wheel...")
os.environ["PATH"] = f"{pybin}:{old_path}"
sys.stdout.flush()
os.chdir("/io/")
run_command("mv build build_tmp")
run_command("make")
run_command(
f"{pybin}/python setup.py bdist_wheel --dist-dir /wheelhouse")
run_command("rm -rf build")
run_command("mv build_tmp build")
os.chdir(old_cwd)
os.environ["PATH"] = old_path
wheels = glob.glob("/wheelhouse/pack_and_doc*.whl")
print(wheels)
sys.stdout.flush()
for wheel in wheels:
print(f"\nREPAIRING WHEEL FOR {wheel}\n")
sys.stdout.flush()
run_command(
f"auditwheel repair \"{wheel}\" --plat {PLAT} -w /io/dist/")
if __name__ == "__main__":
try:
if sys.argv[1] == "build":
build_wheels()
except Exception:
run_docker()
| 25.771084
| 74
| 0.591865
|
12892a7394c07e0c73c98f0762038bd53c40fb80
| 2,128
|
py
|
Python
|
equineclinic/src/webapps/web/wsgi.py
|
ProfessionalIT/customers
|
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
|
[
"MIT"
] | null | null | null |
equineclinic/src/webapps/web/wsgi.py
|
ProfessionalIT/customers
|
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
|
[
"MIT"
] | 1
|
2015-11-08T11:49:35.000Z
|
2015-11-08T11:49:43.000Z
|
equineclinic/src/webapps/web/wsgi.py
|
ProfessionalIT/customers
|
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
|
[
"MIT"
] | null | null | null |
"""
WSGI Utilities
(from web.py)
"""
import os, sys
import http
import webapi as web
from utils import listget
from net import validaddr, validip
import httpserver
def runfcgi(func, addr=('localhost', 8000)):
"""Runs a WSGI function as a FastCGI server."""
#import flup.server.fcgi as flups
#return flups.WSGIServer(func, multiplexed=True, bindAddress=addr).run()
import fcgi as flups
return flups.WSGIServer(func, multiplexed=False, bindAddress=addr).run()
def runscgi(func, addr=('localhost', 4000)):
"""Runs a WSGI function as an SCGI server."""
import flup.server.scgi as flups
return flups.WSGIServer(func, bindAddress=addr).run()
def runwsgi(func):
"""
Runs a WSGI-compatible `func` using FCGI, SCGI, or a simple web server,
as appropriate based on context and `sys.argv`.
"""
if os.environ.has_key('SERVER_SOFTWARE'): # cgi
os.environ['FCGI_FORCE_CGI'] = 'Y'
if (os.environ.has_key('PHP_FCGI_CHILDREN') #lighttpd fastcgi
or os.environ.has_key('SERVER_SOFTWARE')):
return runfcgi(func, None)
if 'fcgi' in sys.argv or 'fastcgi' in sys.argv:
args = sys.argv[1:]
if 'fastcgi' in args: args.remove('fastcgi')
elif 'fcgi' in args: args.remove('fcgi')
if args:
return runfcgi(func, validaddr(args[0]))
else:
return runfcgi(func, None)
if 'scgi' in sys.argv:
args = sys.argv[1:]
args.remove('scgi')
if args:
return runscgi(func, validaddr(args[0]))
else:
return runscgi(func)
return httpserver.runsimple(func, validip(listget(sys.argv, 1, '')))
def _is_dev_mode():
# quick hack to check if the program is running in dev mode.
if os.environ.has_key('SERVER_SOFTWARE') \
or os.environ.has_key('PHP_FCGI_CHILDREN') \
or 'fcgi' in sys.argv or 'fastcgi' in sys.argv \
or 'mod_wsgi' in sys.argv:
return False
return True
# When running the builtin-server, enable debug mode if not already set.
web.config.setdefault('debug', _is_dev_mode())
| 30.84058
| 76
| 0.641447
|
16aa68757a7a727bd115d11ed5071f8a7b22b8c5
| 1,542
|
py
|
Python
|
source/database.py
|
FroggedTV/grenouilleDISCORD
|
bf0dd1004ed0c0eb794896e6f48263b647036617
|
[
"MIT"
] | null | null | null |
source/database.py
|
FroggedTV/grenouilleDISCORD
|
bf0dd1004ed0c0eb794896e6f48263b647036617
|
[
"MIT"
] | null | null | null |
source/database.py
|
FroggedTV/grenouilleDISCORD
|
bf0dd1004ed0c0eb794896e6f48263b647036617
|
[
"MIT"
] | null | null | null |
from threading import Lock
import sqlite3
import config
import logging
from time import time as current_time
LOGGER = logging.getLogger(__name__)
class Database(object):
def __init__(self):
LOGGER.debug("Initializing Database object")
self.database_path = config.database_path
self.lock = Lock()
self.connection = sqlite3.connect(self.database_path,
check_same_thread=False)
self.init_table()
def init_table(self):
self.lock.acquire()
c = self.connection.cursor()
query = '''CREATE TABLE IF NOT EXISTS alerts
(
game_id INTEGER,
status TEXT,
time INTEGER
);'''
c.execute(query)
self.connection.commit()
self.lock.release()
def insert_alert(self, game_id, status):
LOGGER.debug("Inserting alert: "+str(game_id)+" "+status)
self.lock.acquire()
c = self.connection.cursor()
alert_time = int(current_time())
c.execute('INSERT INTO alerts VALUES (?, ?, ?)', (game_id, status, alert_time))
self.connection.commit()
self.lock.release()
def is_alert(self, game_id, status):
self.lock.acquire()
c = self.connection.cursor()
c.execute('SELECT game_id FROM alerts WHERE game_id = ? AND status = ?', (game_id, status))
content = c.fetchone()
self.lock.release()
if content:
return True
else:
return False
| 30.235294
| 99
| 0.588846
|
984fa43367f697857f003286e10ce3717f663f9d
| 2,198
|
py
|
Python
|
flaskblog/models.py
|
oecorrechag/Block
|
b65152dc768ece64ea93535ade5e8c66c3ab9de4
|
[
"MIT"
] | null | null | null |
flaskblog/models.py
|
oecorrechag/Block
|
b65152dc768ece64ea93535ade5e8c66c3ab9de4
|
[
"MIT"
] | null | null | null |
flaskblog/models.py
|
oecorrechag/Block
|
b65152dc768ece64ea93535ade5e8c66c3ab9de4
|
[
"MIT"
] | 1
|
2021-01-04T20:13:06.000Z
|
2021-01-04T20:13:06.000Z
|
from datetime import datetime
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from flaskblog import db, login_manager
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
bio = db.Column(db.String(500), nullable=True, default=' ')
facebook = db.Column(db.String(120), nullable=True, default=' ')
twitter = db.Column(db.String(120), nullable=True, default=' ')
password = db.Column(db.String(60), nullable=True, default=' ')
posts = db.relationship('Post', backref='author', lazy=True)
def get_reset_token(self, expires_sec=1800):
s = Serializer(current_app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}', '{self.bio}', '{self.facebook}', '{self.twitter}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')"
class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(200), nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow)
def __repr__(self):
return '<Task %r' % self.id
| 37.896552
| 129
| 0.684258
|
5514ccd3fbf17b760414beb73ebb3b9a603b6beb
| 1,362
|
bzl
|
Python
|
tensorflow/core/platform/build_config_root.bzl
|
joshz123/tensorflow
|
7841ca029060ab78e221e757d4b1ee6e3e0ffaa4
|
[
"Apache-2.0"
] | 57
|
2017-09-03T07:08:31.000Z
|
2022-02-28T04:33:42.000Z
|
tensorflow/core/platform/build_config_root.bzl
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/core/platform/build_config_root.bzl
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 66
|
2020-05-15T10:05:12.000Z
|
2022-02-14T07:28:18.000Z
|
"""Provides a redirection point for platform specific implementations of starlark utilities."""
load(
"//tensorflow/core/platform/default:build_config_root.bzl",
_if_dynamic_kernels = "if_dynamic_kernels",
_if_static = "if_static",
_if_static_and_not_mobile = "if_static_and_not_mobile",
_register_extension_info = "register_extension_info",
_tf_additional_grpc_deps_py = "tf_additional_grpc_deps_py",
_tf_additional_license_deps = "tf_additional_license_deps",
_tf_additional_plugin_deps = "tf_additional_plugin_deps",
_tf_additional_xla_deps_py = "tf_additional_xla_deps_py",
_tf_cuda_tests_tags = "tf_cuda_tests_tags",
_tf_exec_properties = "tf_exec_properties",
_tf_gpu_tests_tags = "tf_gpu_tests_tags",
_tf_sycl_tests_tags = "tf_sycl_tests_tags",
)
if_dynamic_kernels = _if_dynamic_kernels
if_static = _if_static
if_static_and_not_mobile = _if_static_and_not_mobile
register_extension_info = _register_extension_info
tf_additional_grpc_deps_py = _tf_additional_grpc_deps_py
tf_additional_license_deps = _tf_additional_license_deps
tf_additional_plugin_deps = _tf_additional_plugin_deps
tf_additional_xla_deps_py = _tf_additional_xla_deps_py
tf_cuda_tests_tags = _tf_cuda_tests_tags
tf_exec_properties = _tf_exec_properties
tf_gpu_tests_tags = _tf_gpu_tests_tags
tf_sycl_tests_tags = _tf_sycl_tests_tags
| 43.935484
| 95
| 0.839207
|
862fd7bc00c84c2491fe78077bc4c94dc3c96519
| 475
|
py
|
Python
|
tests/python/inheritance_shadow.py
|
rtburns-jpl/pyre
|
ffc4fc1b2936e355f709d084eb4055954960b3a2
|
[
"BSD-3-Clause"
] | null | null | null |
tests/python/inheritance_shadow.py
|
rtburns-jpl/pyre
|
ffc4fc1b2936e355f709d084eb4055954960b3a2
|
[
"BSD-3-Clause"
] | 1
|
2021-06-10T23:42:13.000Z
|
2021-06-10T23:42:13.000Z
|
tests/python/inheritance_shadow.py
|
jlmaurer/pyre
|
6af38a83621d7d6228d147b4bb94f97fbb10f6e2
|
[
"BSD-3-Clause"
] | 2
|
2020-08-31T18:07:52.000Z
|
2021-12-10T08:54:39.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2020 all rights reserved
#
"""
Verify that we understand how multiple inheritance shadows duplicate attribute names
"""
class Base1(object):
name = "base1"
class Base2(object):
name = "base2"
class Derived(Base1, Base2):
pass
def test():
d = Derived()
assert d.name == "base1"
return
# main
if __name__ == "__main__":
test()
# end of file
| 12.837838
| 84
| 0.631579
|
bb23a006d2da5148fed4918d4cec6935fdbaeb15
| 2,995
|
py
|
Python
|
tpdatasrc/tpgamefiles/scr/tpModifiers/practiced_spellcaster.py
|
dolio/TemplePlus
|
37446bb3d1fcbf460e611a4fcb2caff167e9ac08
|
[
"MIT"
] | 69
|
2015-05-05T14:09:25.000Z
|
2022-02-15T06:13:04.000Z
|
tpdatasrc/tpgamefiles/scr/tpModifiers/practiced_spellcaster.py
|
anatoliy-savchak/TemplePlus
|
50922bb14cc2d7dcf8fceeccf45c3b905c1b512f
|
[
"MIT"
] | 457
|
2015-05-01T22:07:45.000Z
|
2022-03-31T02:19:10.000Z
|
tpdatasrc/tpgamefiles/scr/tpModifiers/practiced_spellcaster.py
|
anatoliy-savchak/TemplePlus
|
50922bb14cc2d7dcf8fceeccf45c3b905c1b512f
|
[
"MIT"
] | 25
|
2016-02-04T21:19:53.000Z
|
2021-11-15T23:14:51.000Z
|
from templeplus.pymod import PythonModifier
from toee import *
import tpdp
import char_class_utils
print "Registering Practiced Spellcaster"
### Moved to new event type so that it can modify caster level without a specific spell
### This will be used e.g. for crafting
# def PracticedSpellcasterLevelModArcane(attachee, args, evt_obj):
# spell_packet = evt_obj.get_spell_packet()
# if spell_packet.is_divine_spell():
# return 0
# bonVal = 4
# cur_caster_lvl = spell_packet.caster_level
# cur_hd = attachee.hit_dice_num
# bonVal = min(4, cur_hd - cur_caster_lvl)
# if bonVal > 0:
# print "Practiced Spellcaster: Adding to caster level " + str(bonVal)
# evt_obj.return_val += bonVal
# return 0
# def PracticedSpellcasterLevelModDivine(attachee, args, evt_obj):
# spell_packet = evt_obj.get_spell_packet()
# if not spell_packet.is_divine_spell():
# return 0
# bonVal = 4
# cur_caster_lvl = spell_packet.caster_level
# cur_hd = attachee.hit_dice_num
# bonVal = min(4, cur_hd - cur_caster_lvl)
# if bonVal > 0:
# print "Practiced Spellcaster: Adding to caster level " + str(bonVal)
# evt_obj.return_val += bonVal
# return 0
def OnAddSpellCastingArcane(attachee, args, evt_obj):
# arg1 holds the Arcane class
if args.get_arg(1) == 0:
highestArcane = char_class_utils.GetHighestArcaneClass(attachee)
args.set_arg(1, highestArcane)
return 0
def OnAddSpellCastingDivine(attachee, args, evt_obj):
# arg1 holds the Divine class
if args.get_arg(1) == 0:
highestDivine = char_class_utils.GetHighestDivineClass(attachee)
args.set_arg(1, highestDivine)
return 0
def OnGetBaseCasterLevel2(attachee, args, evt_obj):
classEnum = args.get_arg(1)
if evt_obj.arg0 != classEnum:
return 0
cur_caster_lvl = evt_obj.bonus_list.get_total()
cur_hd = attachee.hit_dice_num
bon_val = min(4, cur_hd - cur_caster_lvl)
if bon_val > 0:
evt_obj.bonus_list.add(bon_val, 0, "Practiced Spellcaster")
return 0
# arg0 - featEnum (autoset by engine)
# arg1 - set to chosen classEnum
pracSC_Arcane = PythonModifier("Practiced Spellcaster Feat - Arcane", 2) # args are just-in-case placeholders
pracSC_Arcane.MapToFeat("Practiced Spellcaster - Arcane")
# pracSC_Arcane.AddHook(ET_OnGetCasterLevelMod, EK_NONE, PracticedSpellcasterLevelModArcane, ())
pracSC_Arcane.AddHook(ET_OnConditionAdd, EK_NONE, OnAddSpellCastingArcane, ())
pracSC_Arcane.AddHook(ET_OnSpellCasterGeneral, EK_SPELL_Base_Caster_Level_2, OnGetBaseCasterLevel2, ())
pracSC_Divine = PythonModifier("Practiced Spellcaster Feat - Divine", 2) # args are just-in-case placeholders
pracSC_Divine.MapToFeat("Practiced Spellcaster - Divine")
# pracSC_Divine.AddHook(ET_OnGetCasterLevelMod, EK_NONE, PracticedSpellcasterLevelModDivine, ())
pracSC_Divine.AddHook(ET_OnConditionAdd, EK_NONE, OnAddSpellCastingDivine, ())
pracSC_Divine.AddHook(ET_OnSpellCasterGeneral, EK_SPELL_Base_Caster_Level_2, OnGetBaseCasterLevel2, ())
| 36.52439
| 109
| 0.75626
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.