hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
069f2df5fd11d359a16791fa25f850a8150ff903
| 1,394
|
py
|
Python
|
src/manage.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | 2
|
2021-01-05T02:55:57.000Z
|
2021-04-16T15:49:08.000Z
|
src/manage.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | null | null | null |
src/manage.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | 1
|
2021-01-05T08:12:38.000Z
|
2021-01-05T08:12:38.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
from config import paths
def main(*args, **kwargs):
paths.initialise(os.path.join(os.path.dirname(os.path.realpath(__file__)), "__main__"))
assert os.path.isfile(paths.SEARCH_EXEC), paths.SEARCH_EXEC
# import subprocess
# from config import paths
# search_input = os.path.join(paths.SEARCH_DIR, "extracted.matrix")
# search_output = os.path.join(paths.SEARCH_DIR, "output.txt")
# pdb_seq_file = paths.RCSB_SEQS_FASTA
# # assert os.path.isfile(desired)
# assert os.path.isfile(search_input)
# assert os.path.isfile(pdb_seq_file)
# command = f"{desired} {search_input} {pdb_seq_file} " \
# f"{search_output} 30"
# ret_code = subprocess.run(command, shell=True)
# print(ret_code)
# import sys
# sys.exit()
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'site_main.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 35.74359
| 91
| 0.683644
|
6ad093ece279fae4e396df89e6b940c68ddf0642
| 14,249
|
py
|
Python
|
tensorflow_probability/python/sts/dynamic_regression.py
|
SiegeLordEx/probability
|
1a12ee773199bec0ad6d49333e268f98e4b731be
|
[
"Apache-2.0"
] | 1
|
2021-08-23T17:17:52.000Z
|
2021-08-23T17:17:52.000Z
|
tensorflow_probability/python/sts/dynamic_regression.py
|
SiegeLordEx/probability
|
1a12ee773199bec0ad6d49333e268f98e4b731be
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/sts/dynamic_regression.py
|
SiegeLordEx/probability
|
1a12ee773199bec0ad6d49333e268f98e4b731be
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Dynamic Linear Regression model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.sts.internal import util as sts_util
from tensorflow_probability.python.sts.structural_time_series import Parameter
from tensorflow_probability.python.sts.structural_time_series import StructuralTimeSeries
class DynamicLinearRegressionStateSpaceModel(tfd.LinearGaussianStateSpaceModel):
"""State space model for a dynamic linear regression from provided covariates.
A state space model (SSM) posits a set of latent (unobserved) variables that
evolve over time with dynamics specified by a probabilistic transition model
`p(z[t+1] | z[t])`. At each timestep, we observe a value sampled from an
observation model conditioned on the current state, `p(x[t] | z[t])`. The
special case where both the transition and observation models are Gaussians
with mean specified as a linear function of the inputs, is known as a linear
Gaussian state space model and supports tractable exact probabilistic
calculations; see `tfp.distributions.LinearGaussianStateSpaceModel` for
details.
The dynamic linear regression model is a special case of a linear Gaussian SSM
and a generalization of typical (static) linear regression. The model
represents regression `weights` with a latent state which evolves via a
Gaussian random walk:
```
weights[t] ~ Normal(weights[t-1], drift_scale)
```
The latent state (the weights) has dimension `num_features`, while the
parameters `drift_scale` and `observation_noise_scale` are each (a batch of)
scalars. The batch shape of this `Distribution` is the broadcast batch shape
of these parameters, the `initial_state_prior`, and the
`design_matrix`. `num_features` is determined from the last dimension of
`design_matrix` (equivalent to the number of columns in the design matrix in
linear regression).
#### Mathematical Details
The dynamic linear regression model implements a
`tfp.distributions.LinearGaussianStateSpaceModel` with `latent_size =
num_features` and `observation_size = 1` following the transition model:
```
transition_matrix = eye(num_features)
transition_noise ~ Normal(0, diag([drift_scale]))
```
which implements the evolution of `weights` described above. The observation
model is:
```
observation_matrix[t] = design_matrix[t]
observation_noise ~ Normal(0, observation_noise_scale)
```
#### Examples
Given `series1`, `series2` as `Tensors` each of shape `[num_timesteps]`
representing covariate time series, we create a dynamic regression model which
conditions on these via the following:
```python
dynamic_regression_ssm = DynamicLinearRegressionStateSpaceModel(
num_timesteps=42,
design_matrix=tf.stack([series1, series2], axis=-1),
drift_scale=3.14,
initial_state_prior=tfd.MultivariateNormalDiag(scale_diag=[1., 2.]),
observation_noise_scale=1.)
y = dynamic_regression_ssm.sample() # shape [42, 1]
lp = dynamic_regression_ssm.log_prob(y) # scalar
```
Passing additional parameter and `initial_state_prior` dimensions constructs a
batch of models, consider the following:
```python
dynamic_regression_ssm = DynamicLinearRegressionStateSpaceModel(
num_timesteps=42,
design_matrix=tf.stack([series1, series2], axis=-1),
drift_scale=[3.14, 1.],
initial_state_prior=tfd.MultivariateNormalDiag(scale_diag=[1., 2.]),
observation_noise_scale=[1., 2.])
y = dynamic_regression_ssm.sample(3) # shape [3, 2, 42, 1]
lp = dynamic_regression_ssm.log_prob(y) # shape [3, 2]
```
Which (effectively) constructs two independent state space models; the first
with `drift_scale = 3.14` and `observation_noise_scale = 1.`, the second with
`drift_scale = 1.` and `observation_noise_scale = 2.`. We then sample from
each of the models three times and calculate the log probability of each of
the samples under each of the models.
Similarly, it is also possible to add batch dimensions via the
`design_matrix`.
"""
def __init__(self,
num_timesteps,
design_matrix,
drift_scale,
initial_state_prior,
observation_noise_scale=0.,
initial_step=0,
validate_args=False,
allow_nan_stats=True,
name=None):
"""State space model for a dynamic linear regression.
Args:
num_timesteps: Scalar `int` `Tensor` number of timesteps to model
with this distribution.
design_matrix: float `Tensor` of shape `concat([batch_shape,
[num_timesteps, num_features]])`.
drift_scale: Scalar (any additional dimensions are treated as batch
dimensions) `float` `Tensor` indicating the standard deviation of the
latent state transitions.
initial_state_prior: instance of `tfd.MultivariateNormal`
representing the prior distribution on latent states. Must have
event shape `[num_features]`.
observation_noise_scale: Scalar (any additional dimensions are
treated as batch dimensions) `float` `Tensor` indicating the standard
deviation of the observation noise.
Default value: `0.`.
initial_step: scalar `int` `Tensor` specifying the starting timestep.
Default value: `0`.
validate_args: Python `bool`. Whether to validate input with asserts. If
`validate_args` is `False`, and the inputs are invalid, correct behavior
is not guaranteed.
Default value: `False`.
allow_nan_stats: Python `bool`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
Default value: `True`.
name: Python `str` name prefixed to ops created by this class.
Default value: 'DynamicLinearRegressionStateSpaceModel'.
"""
parameters = dict(locals())
with tf.name_scope(
name or 'DynamicLinearRegressionStateSpaceModel') as name:
dtype = dtype_util.common_dtype(
[design_matrix, drift_scale, initial_state_prior])
design_matrix = tf.convert_to_tensor(
value=design_matrix, name='design_matrix', dtype=dtype)
design_matrix_with_time_in_first_dim = distribution_util.move_dimension(
design_matrix, -2, 0)
drift_scale = tf.convert_to_tensor(
value=drift_scale, name='drift_scale', dtype=dtype)
observation_noise_scale = tf.convert_to_tensor(
value=observation_noise_scale,
name='observation_noise_scale',
dtype=dtype)
num_features = prefer_static.shape(design_matrix)[-1]
def observation_matrix_fn(t):
observation_matrix = tf.linalg.LinearOperatorFullMatrix(
tf.gather(design_matrix_with_time_in_first_dim,
t)[..., tf.newaxis, :], name='observation_matrix')
return observation_matrix
self._drift_scale = drift_scale
self._observation_noise_scale = observation_noise_scale
super(DynamicLinearRegressionStateSpaceModel, self).__init__(
num_timesteps=num_timesteps,
transition_matrix=tf.linalg.LinearOperatorIdentity(
num_rows=num_features,
dtype=dtype,
name='transition_matrix'),
transition_noise=tfd.MultivariateNormalDiag(
scale_diag=(drift_scale[..., tf.newaxis] *
tf.ones([num_features], dtype=dtype)),
name='transition_noise'),
observation_matrix=observation_matrix_fn,
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=observation_noise_scale[..., tf.newaxis],
name='observation_noise'),
initial_state_prior=initial_state_prior,
initial_step=initial_step,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def drift_scale(self):
"""Standard deviation of the drift in weights at each timestep."""
return self._drift_scale
@property
def observation_noise_scale(self):
"""Standard deviation of the observation noise."""
return self._observation_noise_scale
class DynamicLinearRegression(StructuralTimeSeries):
"""Formal representation of a dynamic linear regresson model.
The dynamic linear regression model is a special case of a linear Gaussian SSM
and a generalization of typical (static) linear regression. The model
represents regression `weights` with a latent state which evolves via a
Gaussian random walk:
```
weights[t] ~ Normal(weights[t-1], drift_scale)
```
The latent state has dimension `num_features`, while the parameters
`drift_scale` and `observation_noise_scale` are each (a batch of) scalars. The
batch shape of this `Distribution` is the broadcast batch shape of these
parameters, the `initial_state_prior`, and the `design_matrix`. `num_features`
is determined from the last dimension of `design_matrix` (equivalent to the
number of columns in the design matrix in linear regression).
"""
def __init__(self,
design_matrix,
drift_scale_prior=None,
initial_weights_prior=None,
observed_time_series=None,
name=None):
"""Specify a dynamic linear regression.
Args:
design_matrix: float `Tensor` of shape `concat([batch_shape,
[num_timesteps, num_features]])`.
drift_scale_prior: instance of `tfd.Distribution` specifying a prior on
the `drift_scale` parameter. If `None`, a heuristic default prior is
constructed based on the provided `observed_time_series`.
Default value: `None`.
initial_weights_prior: instance of `tfd.MultivariateNormal` representing
the prior distribution on the latent states (the regression weights).
Must have event shape `[num_features]`. If `None`, a weakly-informative
Normal(0., 10.) prior is used.
Default value: `None`.
observed_time_series: `float` `Tensor` of shape `batch_shape + [T, 1]`
(omitting the trailing unit dimension is also supported when `T > 1`),
specifying an observed time series. Any priors not explicitly set will
be given default values according to the scale of the observed time
series (or batch of time series). May optionally be an instance of
`tfp.sts.MaskedTimeSeries`, which includes a mask `Tensor` to specify
timesteps with missing observations.
Default value: `None`.
name: Python `str` for the name of this component.
Default value: 'DynamicLinearRegression'.
"""
with tf.name_scope(name or 'DynamicLinearRegression') as name:
dtype = dtype_util.common_dtype(
[design_matrix, drift_scale_prior, initial_weights_prior])
num_features = prefer_static.shape(design_matrix)[-1]
# Default to a weakly-informative Normal(0., 10.) for the initital state
if initial_weights_prior is None:
initial_weights_prior = tfd.MultivariateNormalDiag(
scale_diag=10. * tf.ones([num_features], dtype=dtype))
# Heuristic default priors. Overriding these may dramatically
# change inference performance and results.
if drift_scale_prior is None:
if observed_time_series is None:
observed_stddev = tf.constant(1.0, dtype=dtype)
else:
_, observed_stddev, _ = sts_util.empirical_statistics(
observed_time_series)
drift_scale_prior = tfd.LogNormal(
loc=tf.math.log(.05 * observed_stddev),
scale=3.,
name='drift_scale_prior')
self._initial_state_prior = initial_weights_prior
self._design_matrix = design_matrix
super(DynamicLinearRegression, self).__init__(
parameters=[
Parameter('drift_scale', drift_scale_prior,
tfb.Chain([tfb.AffineScalar(scale=observed_stddev),
tfb.Softplus()]))
],
latent_size=num_features,
name=name)
@property
def initial_state_prior(self):
"""Prior distribution on the initial latent state (level and scale)."""
return self._initial_state_prior
@property
def design_matrix(self):
"""Tensor representing the design matrix."""
return self._design_matrix
def _make_state_space_model(self,
num_timesteps,
param_map,
initial_state_prior=None,
initial_step=0):
if initial_state_prior is None:
initial_state_prior = self.initial_state_prior
return DynamicLinearRegressionStateSpaceModel(
num_timesteps=num_timesteps,
design_matrix=self.design_matrix,
initial_state_prior=initial_state_prior,
initial_step=initial_step,
**param_map)
| 40.945402
| 89
| 0.701312
|
7ad747de729f09460990896765f7421acaed8f22
| 23,813
|
py
|
Python
|
abs_templates_ec/laygo/tech.py
|
aalto-ele/BAG2_TEMPLATES_EC
|
f04c4367234eb2261397b58fac11a79bdf5bee04
|
[
"BSD-3-Clause"
] | null | null | null |
abs_templates_ec/laygo/tech.py
|
aalto-ele/BAG2_TEMPLATES_EC
|
f04c4367234eb2261397b58fac11a79bdf5bee04
|
[
"BSD-3-Clause"
] | 1
|
2021-03-11T12:07:32.000Z
|
2021-03-15T07:42:03.000Z
|
abs_templates_ec/laygo/tech.py
|
aalto-ele/BAG2_TEMPLATES_EC
|
f04c4367234eb2261397b58fac11a79bdf5bee04
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""This module defines abstract analog mosfet template classes.
"""
from typing import Dict, Any, Tuple, List, TYPE_CHECKING
from bag.layout.util import BBox
from bag.layout.template import TemplateBase
from bag.layout.routing import WireArray
import abc
from ..analog_mos.core import MOSTech
from ..analog_mos.mos import AnalogMOSExt
from ..analog_mos.edge import AnalogEdge
if TYPE_CHECKING:
from bag.layout.tech import TechInfoConfig
from .base import LaygoEndRow
from .core import LaygoBaseInfo
class LaygoTech(MOSTech, metaclass=abc.ABCMeta):
"""An abstract class for drawing transistor related layout for custom digital circuits.
This class defines various methods use to draw layouts used by LaygoBase.
Parameters
----------
config : Dict[str, Any]
the technology configuration dictionary.
tech_info : TechInfo
the TechInfo object.
mos_entry_name : str
name of the entry that contains technology parameters for transistors in
the given configuration dictionary.
"""
def __init__(self, config, tech_info, mos_entry_name='mos'):
# type: (Dict[str, Any], TechInfoConfig, str) -> None
MOSTech.__init__(self, config, tech_info, mos_entry_name=mos_entry_name)
@abc.abstractmethod
def get_default_end_info(self):
# type: () -> Any
"""Returns the default end_info object."""
return 0
@abc.abstractmethod
def get_laygo_mos_row_info(self, # type: LaygoTech
lch_unit, # type: int
w_max, # type: int
w_sub, # type: int
mos_type, # type: str
threshold, # type: str
bot_row_type, # type: str
top_row_type, # type: str
**kwargs):
# type: (...) -> Dict[str, Any]
"""Returns the information dictionary for laygo transistor row.
Parameters
----------
lch_unit : int
the channel length in resolution units.
w_max : int
the maximum transistor width in number of fins/resolution units.
Must be greater than or equal to w_sub.
w_sub : int
the substrate width in number of fins/resolution units.
mos_type : str
the transistor/substrate type. One of 'pch', 'nch', 'ptap', or 'ntap'.
threshold : str
the transistor threshold flavor.
bot_row_type : str
the bottom (next to gate) laygo row type.
top_row_type: str
the top (next to drain/source) laygo row type.
**kwargs
optional keyword arguments.
Returns
-------
row_info : Dict[str, Any]
the row information dictionary. Must have the following entries:
w_max : int
maximum transistor width in this row.
w_sub : int
substrate width in this row.
lch_unit : int
the channel length.
row_type : str
the row transistor/substrate type.
sub_type : str
the row substrate type.
threshold : str
the threshold flavor.
arr_y : Tuple[int, int]
the array box Y interval.
od_y : Tuple[int, int]
the worst case OD Y interval.
po_y : Tuple[int, int]
the PO Y interval.
md_y : Tuple[int, int]
the worst case MD Y interval.
ext_top_info : Any
an object used to compute extension layout above this row.
ext_bot_info : Any
an object used to compute extension layout below this row.
lay_info_list : List[Any]
the default layer information list.
imp_params : Any
the defeault implant parameters of this row.
fill_info_list : List[Any]
the fill information list.
g_conn_y : Tuple[int, int]
the gate connection Y coordinates.
gb_conn_y : Tuple[int, int]
the gate-bar connection Y coordinates.
ds_conn_y : Tuple[int, int]
the drain-source connection Y coordinates.
row_name_id : str
the name ID for this row.
"""
return {}
@abc.abstractmethod
def get_laygo_sub_row_info(self, lch_unit, w, mos_type, threshold, **kwargs):
# type: (int, int, str, str, **kwargs) -> Dict[str, Any]
"""Returns the information dictionary for laygo substrate row.
Parameters
----------
lch_unit : int
the channel length in resolution units.
w : int
the substrate width in number of fins/resolution units.
mos_type : str
the transistor/substrate type. One of 'pch', 'nch', 'ptap', or 'ntap'.
threshold : str
the transistor threshold flavor.
**kwargs
optional keyword arguments
Returns
-------
row_info : Dict[str, Any]
the row information dictionary. Must have the following entries:
w_max : int
maximum transistor width in this row.
w_sub : int
substrate width in this row.
lch_unit : int
the channel length.
row_type : str
the row transistor/substrate type.
sub_type : str
the row substrate type.
threshold : str
the threshold flavor.
arr_y : Tuple[int, int]
the array box Y interval.
od_y : Tuple[int, int]
the worst case OD Y interval.
po_y : Tuple[int, int]
the PO Y interval.
md_y : Tuple[int, int]
the worst case MD Y interval.
ext_top_info : Any
an object used to compute extension layout above this row.
ext_bot_info : Any
an object used to compute extension layout below this row.
lay_info_list : List[Any]
the default layer information list.
imp_params : Any
the defeault implant parameters of this row.
fill_info_list : List[Any]
the fill information list.
g_conn_y : Tuple[int, int]
the gate connection Y coordinates.
gb_conn_y : Tuple[int, int]
the gate-bar connection Y coordinates.
ds_conn_y : Tuple[int, int]
the drain-source connection Y coordinates.
row_name_id : str
the name ID for this row.
"""
return {}
@abc.abstractmethod
def get_laygo_blk_info(self, blk_type, w, row_info, **kwargs):
# type: (str, int, Dict[str, Any], **kwargs) -> Dict[str, Any]
"""Returns the layout information dictionary for the given laygo block.
Parameters
----------
blk_type : str
the laygo block type.
w : int
the transistor width.
row_info : Dict[str, Any]
the row layout information object.
**kwargs
optional keyword arguments.
Returns
-------
blk_info : Dict[str, Any]
the block information dictionary. Contains the following entries:
layout_info : Dict[str, Any]
the block layout information dictionary.
ext_top_info : Any
an object used to compute extension layout above this block.
ext_bot_info : Any
an object used to compute extension layout below this block.
left_edge_info : Any
an object used to compute layout on the left of this block.
right_edge_info : Any
an object used to compute layout on the right of this block.
"""
return {}
@abc.abstractmethod
def get_laygo_end_info(self, lch_unit, mos_type, threshold, fg, is_end, blk_pitch, **kwargs):
# type: (int, str, str, int, bool, int, **kwargs) -> Dict[str, Any]
"""Returns the LaygoBase end row layout information dictionary.
Parameters
----------
lch_unit : int
the channel length in resolution units.
mos_type : str
the transistor type, one of 'pch', 'nch', 'ptap', or 'ntap'.
threshold : str
the substrate threshold type.
fg : int
total number of fingers.
is_end : bool
True if there are no block abutting the bottom.
blk_pitch : int
height quantization pitch, in resolution units.
**kwargs :
optional parameters.
Returns
-------
end_info : Dict[str, Any]
the laygo end row information dictionary.
"""
return {}
@abc.abstractmethod
def get_laygo_space_info(self, row_info, num_blk, left_blk_info, right_blk_info):
# type: (Dict[str, Any], int, Any, Any) -> Dict[str, Any]
"""Returns a new layout information dictionary for drawing LaygoBase space blocks.
Parameters
----------
row_info : Dict[str, Any]
the Laygo row information dictionary.
num_blk : int
number of space blocks.
left_blk_info : Any
left block information.
right_blk_info : Any
right block information.
Returns
-------
space_info : Dict[str, Any]
the space layout information dictionary.
"""
pass
@abc.abstractmethod
def get_row_extension_info(self, bot_ext_list, top_ext_list):
# type: (List[Any], List[Any]) -> List[Tuple[int, int, Any, Any]]
"""Compute the list of bottom/top extension information pair to create Laygo extension row.
Parameters
----------
bot_ext_list : List[Any]
list of bottom extension information objects.
top_ext_list : List[Any]
list of top extension information objects.
Returns
-------
ext_combo_list : List[Tuple[int, int, Any, Any]]
list of number of fingers and bottom/top extension information objects for each
extension primitive.
"""
return []
@abc.abstractmethod
def draw_laygo_space_connection(self, template, space_info, left_blk_info, right_blk_info):
# type: (TemplateBase, Dict[str, Any], Any, Any) -> None
"""Draw any space geometries necessary in the given template.
Parameters
----------
template : TemplateBase
the TemplateBase object to draw layout in.
space_info : Dict[str, Any]
the laygo space block information dictionary.
left_blk_info : Any
left block information.
right_blk_info : Any
right block information.
"""
pass
@abc.abstractmethod
def draw_laygo_connection(self, template, blk_info, blk_type, options):
# type: (TemplateBase, Dict[str, Any], str, Dict[str, Any]) -> None
"""Draw digital transistor connection in the given template.
Parameters
----------
template : TemplateBase
the TemplateBase object to draw layout in.
blk_info : Dict[str, Any]
the laygo block information dictionary.
blk_type : str
the digital block type.
options : Dict[str, Any]
any additional connection options.
"""
pass
def get_laygo_fg2d_s_short(self):
# type: () -> bool
"""Returns True if the two source wires of fg2d is shorted together in the primitive.
Returns
-------
s_short : bool
True if the two source wires of fg2d is shorted together
"""
return self.mos_config['laygo_fg2d_s_short']
def can_short_ds_conn_layer(self):
# type: () -> bool
"""
Returns True if the drain and source ports on dig_conn_layer of adjacent rows with gate connection in the
middle, can be shorted without violating spacing to gate connections
"""
return self.mos_config.get('laygo_short_ds_conn_layer', True)
def get_sub_columns(self, lch_unit):
# type: (int) -> int
"""Returns the number of columns per substrate block.
Parameters
----------
lch_unit : int
the channel length in resolution units.
Returns
-------
num_cols : int
number of columns per substrate block.
"""
return self.get_mos_tech_constants(lch_unit)['laygo_sub_ncol']
def get_sub_port_columns(self, lch_unit):
# type: (int) -> List[int]
"""Returns the columns indices that have ports in substrate block.
Parameters
----------
lch_unit : int
the channel length in resolution units.
Returns
-------
port_cols : List[int]
the columns indices that have ports in substrate block.
"""
return self.get_mos_tech_constants(lch_unit)['laygo_sub_port_cols']
def get_min_sub_space_columns(self, lch_unit):
# type: (int) -> int
"""Returns the minimum number of space columns needed around substrate blocks.
Parameters
----------
lch_unit : int
the channel length in resolution units.
Returns
-------
num_cols : int
minimum number of space columns.
"""
return self.get_mos_tech_constants(lch_unit)['laygo_sub_spx']
def get_laygo_conn_track_info(self, lch_unit):
# type: (int) -> Tuple[int, int]
"""Returns dummy connection layer space and width.
Parameters
----------
lch_unit : int
channel length in resolution units.
Returns
-------
dum_sp : int
space between dummy tracks in resolution units.
dum_w : int
width of dummy tracks in resolution units.
"""
mos_constants = self.get_mos_tech_constants(lch_unit)
sd_pitch = mos_constants['sd_pitch']
laygo_conn_w = mos_constants['laygo_conn_w']
laygo_num_sd_per_track = mos_constants['laygo_num_sd_per_track']
return sd_pitch * laygo_num_sd_per_track - laygo_conn_w, laygo_conn_w
def draw_extensions(self, # type: LaygoTech
template, # type: TemplateBase
laygo_info, # type: LaygoBaseInfo
num_cols, # type: int
w, # type: int
yext, # type: int
bot_ext_list, # type: List[Tuple[int, Any]]
top_ext_list, # type: List[Tuple[int, Any]]
):
# type: (...) -> Tuple[Any, Any]
"""Draw extension rows in the given LaygoBase/DigitalBase template.
Parameters
----------
template : TemplateBase
the LaygoBase/DigitalBase object to draw layout in.
laygo_info : LaygoBaseInfo
the LaygoBaseInfo object.
num_cols : int
number of columns.
w : int
extension width in number of mos pitches.
yext : int
Y coordinate of the extension block.
bot_ext_list : List[Tuple[int, Any]]
list of tuples of end finger index and bottom extension information
top_ext_list : List[Tuple[int, Any]]
list of tuples of end finger index and top extension information
Returns
-------
edgesl : Optional[Tuple[int, str, Dict[str, Any]]]
a tuple of Y coordinate, orientation, and parameters for left edge.
edgesr : Optional[Tuple[int, str, Dict[str, Any]]]
a tuple of Y coordinate, orientation, and parameters for right edge.
"""
lch = laygo_info.lch
top_layer = laygo_info.top_layer
guard_ring_nf = laygo_info.guard_ring_nf
ext_groups = self.get_row_extension_info(bot_ext_list, top_ext_list)
edgesl, edgesr = None, None
if w > 0 or self.draw_zero_extension():
for idx, (fg_off, fg, bot_info, top_info) in enumerate(ext_groups):
ext_params = dict(
lch=lch,
w=w,
fg=fg,
top_ext_info=top_info,
bot_ext_info=bot_info,
is_laygo=True,
)
curx = laygo_info.col_to_coord(fg_off, unit_mode=True)
ext_master = template.new_template(params=ext_params, temp_cls=AnalogMOSExt)
template.add_instance(ext_master, loc=(curx, yext), unit_mode=True)
if fg_off == 0:
adj_blk_info = ext_master.get_left_edge_info()
# compute edge parameters
cur_ext_edge_params = dict(
top_layer=top_layer,
guard_ring_nf=guard_ring_nf,
name_id=ext_master.get_layout_basename(),
layout_info=ext_master.get_edge_layout_info(),
adj_blk_info=adj_blk_info,
is_laygo=True,
)
edgesl = (yext, cur_ext_edge_params)
if fg_off + fg == num_cols:
adj_blk_info = ext_master.get_right_edge_info()
# compute edge parameters
cur_ext_edge_params = dict(
top_layer=top_layer,
guard_ring_nf=guard_ring_nf,
name_id=ext_master.get_layout_basename(),
layout_info=ext_master.get_edge_layout_info(),
adj_blk_info=adj_blk_info,
is_laygo=True,
)
edgesr = (yext, cur_ext_edge_params)
return edgesl, edgesr
def draw_boundaries(self, # type: LaygoTech
template, # type: TemplateBase
laygo_info, # type: LaygoBaseInfo
num_col, # type: int
yt, # type: int
bot_end_master, # type: LaygoEndRow
top_end_master, # type: LaygoEndRow
edgel_infos, # type: List[Tuple[int, str, Dict[str, Any]]]
edger_infos, # type: List[Tuple[int, str, Dict[str, Any]]]
):
# type: (...) -> Tuple[BBox, List[WireArray], List[WireArray]]
"""Draw boundaries for LaygoBase/DigitalBase.
Parameters
----------
template : TemplateBase
the LaygoBase/DigitalBase object to draw layout in.
laygo_info : LaygoBaseInfo
the LaygoBaseInfo object.
num_col : int
number of primitive columns in the template.
yt : int
the top Y coordinate of the template. Used to determine top end row placement.
bot_end_master: LaygoEndRow
the bottom LaygoEndRow master.
top_end_master : LaygoEndRow
the top LaygoEndRow master.
edgel_infos: List[Tuple[int, str, Dict[str, Any]]]
a list of Y coordinate, orientation, and parameters for left edge blocks.
edger_infos: List[Tuple[int, str, Dict[str, Any]]]
a list of Y coordinate, orientation, and parameters for right edge blocks.
Returns
-------
arr_box : BBox
the array box.
vdd_warrs : List[WireArray]
any VDD wires in the edge block due to guard ring.
vss_warrs : List[WireArray]
any VSS wires in the edge block due to guard ring.
"""
end_mode = laygo_info.end_mode
guard_ring_nf = laygo_info.guard_ring_nf
col_width = laygo_info.col_width
nx = num_col
spx = col_width
emargin_l, emargin_r = laygo_info.edge_margins
ewidth_l, ewidth_r = laygo_info.edge_widths
xoffset = emargin_l + ewidth_l
# draw top and bottom end row
inst = template.add_instance(bot_end_master, inst_name='XRBOT', loc=(xoffset, 0),
nx=nx, spx=spx, unit_mode=True)
arr_box = inst.array_box
inst = template.add_instance(top_end_master, inst_name='XRBOT', loc=(xoffset, yt),
orient='MX', nx=nx, spx=spx, unit_mode=True)
arr_box = arr_box.merge(inst.array_box)
# draw corners
left_end = (end_mode & 4) != 0
right_end = (end_mode & 8) != 0
edge_inst_list = []
xr = laygo_info.tot_width
for orient, y, master in (('R0', 0, bot_end_master), ('MX', yt, top_end_master)):
for x, is_end, flip_lr in ((emargin_l, left_end, False),
(xr - emargin_r, right_end, True)):
edge_params = dict(
is_end=is_end,
guard_ring_nf=guard_ring_nf,
adj_blk_info=master.get_left_edge_info(),
name_id=master.get_layout_basename(),
layout_info=master.get_edge_layout_info(),
is_laygo=True,
)
edge_master = template.new_template(params=edge_params, temp_cls=AnalogEdge)
if flip_lr:
eorient = 'MY' if orient == 'R0' else 'R180'
else:
eorient = orient
edge_inst_list.append(template.add_instance(edge_master, orient=eorient,
loc=(x, y), unit_mode=True))
# draw edge blocks
for y, flip_ud, edge_params in edgel_infos:
orient = 'MX' if flip_ud else 'R0'
edge_master = template.new_template(params=edge_params, temp_cls=AnalogEdge)
edge_inst_list.append(template.add_instance(edge_master, orient=orient,
loc=(emargin_l, y), unit_mode=True))
for y, flip_ud, edge_params in edger_infos:
orient = 'R180' if flip_ud else 'MY'
edge_master = template.new_template(params=edge_params, temp_cls=AnalogEdge)
edge_inst_list.append(template.add_instance(edge_master, orient=orient,
loc=(xr - emargin_r, y), unit_mode=True))
gr_vss_warrs = []
gr_vdd_warrs = []
conn_layer = self.get_dig_conn_layer()
for inst in edge_inst_list:
if inst.has_port('VDD'):
gr_vdd_warrs.extend(inst.get_all_port_pins('VDD', layer=conn_layer))
elif inst.has_port('VSS'):
gr_vss_warrs.extend(inst.get_all_port_pins('VSS', layer=conn_layer))
arr_box = arr_box.merge(inst.array_box)
# connect body guard rings together
gr_vdd_warrs = template.connect_wires(gr_vdd_warrs)
gr_vss_warrs = template.connect_wires(gr_vss_warrs)
arr_box_x = laygo_info.get_placement_info(num_col).arr_box_x
arr_box = BBox(arr_box_x[0], arr_box.bottom_unit, arr_box_x[1], arr_box.top_unit,
arr_box.resolution, unit_mode=True)
return arr_box, gr_vdd_warrs, gr_vss_warrs
| 38.284566
| 113
| 0.56003
|
63cc1f2b8c5a1407ab0f09d67528271867f53190
| 1,915
|
py
|
Python
|
utils/process_log.py
|
AnastaciaVolkova/SFND_2D_Feature_Tracking
|
9e7c343eeb78950c69e8067c9e6d37843805c8b5
|
[
"MIT"
] | null | null | null |
utils/process_log.py
|
AnastaciaVolkova/SFND_2D_Feature_Tracking
|
9e7c343eeb78950c69e8067c9e6d37843805c8b5
|
[
"MIT"
] | null | null | null |
utils/process_log.py
|
AnastaciaVolkova/SFND_2D_Feature_Tracking
|
9e7c343eeb78950c69e8067c9e6d37843805c8b5
|
[
"MIT"
] | null | null | null |
import pickle
import pandas as pd
import numpy as np
def key_points_info(data):
data_x = dict()
detectors = list()
for record in data:
if record[1] is not None:
det, des = record[0].split("_")
if det not in detectors:
for metric, column in record[1]["detector"].items():
data_x[(det, metric)] = column
data_det = pd.DataFrame.from_dict(data_x)
data_det.to_csv("detectors.csv", index=False)
def matchers_info(data):
data_x = list()
for record in data:
if record[1] is not None:
det, des = record[0].split("_")
for metric, column in record[1]["matcher"].items():
data_x.append(((det, des, metric), column))
data_x.sort(key=lambda x: x[0][0])
data_x = dict((x, y) for x, y in data_x)
data_det = pd.DataFrame.from_dict(data_x)
data_det.to_csv("matchers.csv", index=False)
def det_des_time_info(data):
data_x = dict()
for record in data:
if record[1] is not None:
det, des = record[0].split("_")
data_x[(record[0], f"detector {det}, ms")] = record[1]["detector"]["t"]
data_x[(record[0], f"descriptor {des}, ms")] = record[1]["descriptor"]["t"]
data_tms = pd.DataFrame.from_dict(data_x)
print(data_tms)
data_tms.to_csv("timing.csv", index=False)
det_des_timing = list()
data_tms_cols = list(set(x[0] for x in data_tms.columns))
for det_des in data_tms_cols:
mx = data_tms[det_des].to_numpy()
det_des_timing.append((det_des, np.mean(mx.sum(axis=1))))
det_des_timing.sort(key=lambda x: x[1])
for comb, t in det_des_timing:
print(f"{comb},{t}")
def main():
with open("logs.pk", "rb") as fid:
data = pickle.load(fid)
key_points_info(data)
matchers_info(data)
det_des_time_info(data)
if __name__ == "__main__":
main()
| 29.921875
| 87
| 0.601044
|
eaad4856eb0883dea9d5d2a29769337d6b9c9f12
| 7,325
|
py
|
Python
|
tests/integration/suite/test_node.py
|
fizzyduck/rancher
|
d8099d85f22a6e46fd93f8fc406df8cf91ccb12f
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:50:35.000Z
|
2019-10-10T06:50:35.000Z
|
tests/integration/suite/test_node.py
|
fizzyduck/rancher
|
d8099d85f22a6e46fd93f8fc406df8cf91ccb12f
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/suite/test_node.py
|
fizzyduck/rancher
|
d8099d85f22a6e46fd93f8fc406df8cf91ccb12f
|
[
"Apache-2.0"
] | null | null | null |
import base64
import hashlib
import os
import tempfile
import pytest
from rancher import ApiError
from .common import auth_check
from .conftest import wait_for
import time
def test_node_fields(admin_mc):
cclient = admin_mc.client
fields = {
'annotations': 'cru',
'labels': 'cru',
'nodeTaints': 'r',
'namespaceId': 'cr',
'conditions': 'r',
'allocatable': 'r',
'capacity': 'r',
'hostname': 'r',
'info': 'r',
'ipAddress': 'r',
'externalIpAddress': 'r',
'limits': 'r',
'publicEndpoints': 'r',
'nodePoolId': 'r',
'nodeName': 'r',
'requested': 'r',
'clusterId': 'cr',
'etcd': 'cr',
'controlPlane': 'cr',
'worker': 'cr',
'requestedHostname': 'cr',
'volumesAttached': 'r',
'nodeTemplateId': 'cr',
'volumesInUse': 'r',
'podCidr': 'r',
'podCidrs': 'r',
'name': 'cru',
'taints': 'ru',
'unschedulable': 'r',
'providerId': 'r',
'sshUser': 'r',
'imported': 'cru',
'dockerInfo': 'r',
}
for name in cclient.schema.types['node'].resourceFields.keys():
if name.endswith("Config"):
fields[name] = 'cr'
fields['customConfig'] = 'cru'
auth_check(cclient.schema, 'node', 'crud', fields)
def test_node_template_delete(admin_mc, remove_resource):
"""Test deleting a nodeTemplate that is in use by a nodePool.
The nodeTemplate should not be deleted while in use, after the nodePool is
removed it should delete.
"""
client = admin_mc.client
node_template, cloud_credential = create_node_template(client)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
# node_pool needs to come first or the API will stop the delete if the
# template still exists
remove_resource(node_pool)
remove_resource(node_template)
assert node_pool.nodeTemplateId == node_template.id
# Attempting to delete the template should raise an ApiError
with pytest.raises(ApiError) as e:
client.delete(node_template)
assert e.value.error.status == 405
# remove link should not be available
node_template = client.reload(node_template)
assert 'remove' not in node_template.links
client.delete(node_pool)
def _node_pool_reload():
np = client.reload(node_pool)
return np is None
wait_for(_node_pool_reload)
node_template = client.reload(node_template)
assert 'remove' in node_template.links
# NodePool is gone, template should delete
client.delete(node_template)
node_template = client.reload(node_template)
assert node_template is None
def test_cloud_credential_delete(admin_mc, remove_resource):
"""Test deleting a cloud credential that is referenced by nodeTemplate, which
is in use by nodePool
"""
client = admin_mc.client
node_template, cloud_credential = create_node_template(client)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
assert node_pool.nodeTemplateId == node_template.id
wait_for_node_template(client, node_template.id)
# Attempting to delete the template should raise an ApiError
with pytest.raises(ApiError) as e:
client.delete(cloud_credential)
assert e.value.error.status == 405
def test_writing_config_to_disk(admin_mc, wait_remove_resource):
"""Test that userdata and other fields from node driver configs are being
writting to disk as expected.
"""
client = admin_mc.client
tempdir = tempfile.gettempdir()
cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig={"accessToken": "test"})
wait_remove_resource(cloud_credential)
userdata = "do cool stuff"
def _node_template():
try:
return client.create_node_template(
digitaloceanConfig={'userdata': userdata},
name='danssweetassthing',
cloudCredentialId=cloud_credential.id)
except ApiError:
return False
node_template = wait_for(_node_template,
fail_handler=lambda:
'failed to create node template')
wait_remove_resource(node_template)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
wait_remove_resource(node_pool)
dir_name = string_to_encoding(userdata)
full_path = os.path.join(tempdir, dir_name, 'userdata')
def file_exists():
try:
os.stat(full_path)
return True
except FileNotFoundError:
return False
wait_for(file_exists)
with open(full_path, 'r') as f:
contents = f.read()
assert contents == userdata
def test_node_driver_schema(admin_mc):
"""Test node driver schemas have path fields removed."""
drivers = ['amazonec2config', 'digitaloceanconfig', 'azureconfig']
bad_fields = ['sshKeypath', 'sshKeyPath', 'existingKeyPath']
client = admin_mc.client
for driver in drivers:
schema = client.schema.types[driver]
for field in bad_fields:
assert field not in schema.resourceFields, \
'Driver {} has field {}'.format(driver, field)
def create_node_template(client):
cloud_credential = client.create_cloud_credential(
azurecredentialConfig={"clientId": "test",
"subscriptionId": "test",
"clientSecret": "test"})
wait_for_cloud_credential(client, cloud_credential.id)
node_template = client.create_node_template(
azureConfig={},
cloudCredentialId=cloud_credential.id)
assert node_template.cloudCredentialId == cloud_credential.id
return node_template, cloud_credential
def wait_for_cloud_credential(client, cloud_credential_id, timeout=60):
start = time.time()
interval = 0.5
creds = client.list_cloud_credential()
cred = None
for val in creds:
if val["id"] == cloud_credential_id:
cred = val
while cred is None:
if time.time() - start > timeout:
print(cred)
raise Exception('Timeout waiting for cloud credential')
time.sleep(interval)
interval *= 2
for val in creds:
if val["id"] == cloud_credential_id:
cred = val
return cred
def string_to_encoding(input):
m = hashlib.sha256()
m.update(bytes(input, 'utf-8'))
return base64.b32encode(m.digest())[:10].decode('utf-8')
def wait_for_node_template(client, node_template_id, timeout=60):
start = time.time()
interval = 0.5
template = None
while template is None:
if time.time() - start > timeout:
raise Exception('Timeout waiting for node template lister')
time.sleep(interval)
interval *= 2
nodeTemplates = client.list_node_template()
for each_template in nodeTemplates:
if each_template["id"] == node_template_id:
template = each_template
| 30.648536
| 81
| 0.640137
|
8da703c93ef18d2c6d5f0c19883a87eb208d0a24
| 15,920
|
py
|
Python
|
vee/database.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | 6
|
2017-11-05T02:44:10.000Z
|
2021-07-14T19:10:56.000Z
|
vee/database.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | null | null | null |
vee/database.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | 1
|
2017-01-31T23:10:09.000Z
|
2017-01-31T23:10:09.000Z
|
import datetime
import os
import sqlite3
import shutil
import re
import six
from vee.utils import makedirs
from vee import log
_migrations = []
@_migrations.append
def _create_initial_tables(con):
con.execute('''CREATE TABLE repositories (
id INTEGER PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT (datetime('now')),
fetched_at TIMESTAMP,
name TEXT UNIQUE NOT NULL,
path TEXT,
remote TEXT NOT NULL DEFAULT 'origin',
branch TEXT NOT NULL DEFAULT 'master',
is_default INTEGER NOT NULL DEFAULT 0
)''')
con.execute('''CREATE TRIGGER insert_default_repository
AFTER INSERT ON repositories
WHEN NEW.is_default
BEGIN
UPDATE repositories SET is_default = 0 WHERE id != NEW.id;
END
''')
con.execute('''CREATE TRIGGER update_default_repository
AFTER UPDATE OF is_default ON repositories
WHEN NEW.is_default != 0
BEGIN
UPDATE repositories SET is_default = 0 WHERE id != NEW.id;
END
''')
con.execute('''CREATE TABLE packages (
id INTEGER PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT (datetime('now')),
url TEXT NOT NULL,
name TEXT,
provides TEXT,
requires TEXT,
-- Names, either from the user or discovered.
package_name TEXT,
build_name TEXT,
install_name TEXT,
-- Paths for direct lookup.
package_path TEXT,
build_path TEXT,
install_path TEXT,
scanned_for_libraries INTEGER NOT NULL DEFAULT 0
)''')
con.execute('''CREATE TABLE environments (
id INTEGER PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT (datetime('now')),
modified_at TIMESTAMP NOT NULL DEFAULT (datetime('now')),
repository_id INTEGER REFERENCES repositories(id),
repository_commit TEXT,
name TEXT NOT NULL,
path TEXT NOT NULL,
-- Attributes parsed from original file.
version TEXT,
revision TEXT
)''')
con.execute('''CREATE TABLE links (
id INTEGER PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT (datetime('now')),
environment_id INTEGER REFERENCES environments(id) NOT NULL,
package_id INTEGER REFERENCES packages(id) NOT NULL
)''')
con.execute('''CREATE TRIGGER on_insert_links
AFTER INSERT ON links BEGIN
UPDATE environments SET modified_at = datetime('now') WHERE id = NEW.environment_id;
END
''')
con.execute('''CREATE TABLE development_packages (
id INTEGER PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT (datetime('now')),
name TEXT NOT NULL,
path TEXT NOT NULL,
environ TEXT NOT NULL DEFAULT "{}"
)''')
con.execute('''CREATE TABLE config (
id INTEGER PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT (datetime('now')),
name TEXT UNIQUE NOT NULL,
value TEXT NOT NULL
)''')
@_migrations.append
def _create_repos_path_column(con):
if 'path' not in con.columns('repositories'):
con.execute('''ALTER TABLE repositories ADD COLUMN path TEXT''')
@_migrations.append
def _create_packages_etag_column(con):
if 'etag' not in con.columns('packages'):
con.execute('''ALTER TABLE packages ADD COLUMN etag TEXT''')
@_migrations.append
def _created_shared_libraries(con):
if 'scanned_for_libraries' not in con.columns('packages'):
con.execute('''ALTER TABLE packages ADD COLUMN scanned_for_libraries INTEGER NOT NULL DEFAULT 0''')
con.execute('''CREATE TABLE shared_libraries (
id INTEGER PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT (datetime('now')),
package_id INTEGER REFERENCES packages(id) NOT NULL,
name TEXT NOT NULL, -- Mainly for searching.
path TEXT NOT NULL
)''')
@_migrations.append
def _rename_dev_packages(con):
if 'dev_packages' in con.tables():
con.execute('''ALTER TABLE dev_packages RENAME TO development_packages''')
@_migrations.append
def _drop_type_columns(con):
if 'package_type' in con.columns('packages'):
con.drop_column('packages', 'package_type')
if 'build_type' in con.columns('packages'):
con.drop_column('packages', 'build_type')
@_migrations.append
def _create_dependency_table(con):
con.execute('''CREATE TABLE package_dependencies (
id INTEGER PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT (datetime('now')),
depender_id INTEGER REFERENCES packages(id) NOT NULL,
dependee_id INTEGER REFERENCES packages(id) NOT NULL
)''')
@_migrations.append
def _drop_dev_packages(con):
if 'development_packages' in con.tables():
con.execute('''DROP TABLE development_packages''')
class _Row(sqlite3.Row):
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __getitem__(self, key):
try:
return super(_Row, self).__getitem__(key)
except IndexError:
if isinstance(key, str):
raise KeyError(key)
else:
raise
def __contains__(self, key):
try:
self[key]
return True
except (IndexError, KeyError):
return False
class _Connection(sqlite3.Connection):
def __init__(self, *args, **kwargs):
super(_Connection, self).__init__(*args, **kwargs)
self.row_factory = _Row
# We wish we could use unicode everywhere, but there are too many
# unknown codepaths for us to evaluate its safety. Python 3 would
# definitely help us here...
self.text_factory = str
self.isolation_level = None
self._context_depth = 0
def __enter__(self):
if not self._context_depth:
self.execute('BEGIN')
else:
self.execute('SAVEPOINT pycontext%d' % self._context_depth)
self._context_depth += 1
return self
def __exit__(self, type_, value, tb):
self._context_depth -= 1
if type_:
if not self._context_depth:
self.execute('ROLLBACK')
else:
self.execute('ROLLBACK TO pycontext%d' % self._context_depth)
else:
if not self._context_depth:
self.execute('COMMIT')
else:
self.execute('RELEASE pycontext%d' % self._context_depth)
def cursor(self):
return super(_Connection, self).cursor(_Cursor)
def insert(self, *args, **kwargs):
return self.cursor().insert(*args, **kwargs)
def update(self, *args, **kwargs):
return self.cursor().update(*args, **kwargs)
def tables(self):
return [row['name'] for row in self.execute("SELECT name FROM sqlite_master WHERE type='table'")]
def schema(self, table_name):
return self.execute('SELECT sql FROM sqlite_master WHERE name = ?', [table_name]).fetchone()['sql']
def columns(self, table_name):
return [row['name'] for row in self.execute('PRAGMA table_info(%s)' % table_name)]
def drop_column(self, table_name, column_name):
old_columns = self.columns(table_name)
new_columns = [x for x in old_columns if x != column_name]
if new_columns == old_columns:
raise ValueError(column_name)
old_schema = self.schema(table_name)
new_schema = re.sub(r'\)\s*$', ',', old_schema)
new_schema = re.sub('%s[^,]+,' % column_name, '', new_schema)
new_schema = re.sub(r',$', ')', new_schema)
if new_schema == old_schema:
raise ValueError('no change in schema: %s' % new_schema)
self.execute('ALTER TABLE %s RENAME TO old_%s' % (table_name, table_name))
self.execute(new_schema)
self.execute('INSERT INTO %s (%s) SELECT %s FROM old_%s' % (
table_name, ','.join(new_columns), ','.join(new_columns), table_name
))
self.execute('DROP TABLE old_%s' % table_name)
def escape_identifier(x):
return '"%s"' % x.replace('"', '""')
class _Cursor(sqlite3.Cursor):
def insert(self, table, data, on_conflict=None):
pairs = sorted(data.items())
query = 'INSERT %s INTO %s (%s) VALUES (%s)' % (
'OR ' + on_conflict if on_conflict else '',
escape_identifier(table),
','.join(escape_identifier(k) for k, v in pairs),
','.join('?' for _ in pairs),
)
params = [v for k, v in pairs]
log.debug('%s %r' % (query, params))
self.execute(query, params)
return self.lastrowid
def update(self, table, data, where=None):
columns, params = zip(*sorted(data.items()))
if where:
where = sorted(where.items())
params = list(params)
params.extend(v for k, v in where)
where = 'WHERE %s' % ' AND '.join('%s = ?' % escape_identifier(k) for k, v in where)
self.execute('UPDATE %s SET %s %s' % (
escape_identifier(table),
', '.join('%s = ?' % escape_identifier(c) for c in columns),
where or '',
), params)
return self
class Database(object):
def __init__(self, path):
self.path = path
if self.exists:
self._migrate()
def _migrate(self, con=None):
did_backup = False
con = con or self.connect()
with con:
# We try to select without creating the table, so that we don't
# force the database to lock every time (which will fail if
# something else has an exclusive lock).
try:
cur = con.execute('SELECT name FROM migrations')
except sqlite3.OperationalError as e:
if e.args[0] != 'no such table: migrations':
raise
con.execute('''CREATE TABLE IF NOT EXISTS migrations (
name TEXT NOT NULL,
applied_at TIMESTAMP NOT NULL DEFAULT (datetime('now'))
)''')
existing = set()
else:
existing = set(row[0] for row in cur)
for f in _migrations:
name = f.__name__.strip('_')
if name not in existing:
if not did_backup:
self._backup()
did_backup = True
with con:
f(con)
con.execute('INSERT INTO migrations (name) VALUES (?)', [name])
def _backup(self):
backup_dir = os.path.join(os.path.dirname(self.path), 'backups')
backup_path = os.path.join(backup_dir, os.path.basename(self.path) + '.' + datetime.datetime.utcnow().isoformat('T'))
makedirs(backup_dir)
shutil.copyfile(self.path, backup_path)
@property
def exists(self):
return os.path.exists(self.path)
def create(self):
if self.exists:
raise ValueError('database already exists')
con = self.connect(create=True)
self._migrate(con)
def connect(self, create=False):
if not create and not self.exists:
raise ValueError('database does not exist; run `vee init`')
con = sqlite3.connect(self.path, factory=_Connection)
con.execute('PRAGMA foreign_keys = ON')
return con
def cursor(self):
return self.connect().cursor()
def execute(self, *args):
return self.connect().execute(*args)
def insert(self, *args, **kwargs):
return self.cursor().insert(*args, **kwargs)
def update(self, *args, **kwargs):
return self.cursor().update(*args, **kwargs)
class Column(object):
def __init__(self, name=None):
self.name = name
self._getter = self._setter = self._deleter = None
self._persist = self._restore = None
def copy(self):
copy = Column(self.name)
copy._getter = self._getter
copy._persist = self._persist
copy._restore = self._restore
return copy
def getter(self, func):
self._getter = func
return self
def persist(self, func):
self._persist = func
return self
def restore(self, func):
self._restore = func
return self
def __get__(self, obj, cls):
if self._getter:
return self._getter(obj)
try:
return obj.__dict__[self.name]
except KeyError:
raise AttributeError(self.name)
def __set__(self, obj, value):
obj.__dict__[self.name] = value
obj.is_dirty = True
def __delete__(self, obj):
raise RuntimeError("cannot delete DB columns")
class DBMetaclass(type):
def __new__(cls, name, bases, attrs):
table_name = attrs.get('__tablename__')
# Collect existing columns from bases.
columns = {}
for base in reversed(bases):
table_name = table_name or getattr(base, '__tablename__', None)
for col in getattr(base, '__columns__', []):
columns[col.name] = col.copy()
# Collect new columns.
for k, v in attrs.items():
# If this is now a property, but it was once a column, upgrade it
# to a column.
if isinstance(v, property):
col = columns.get(k)
if col:
col._getter = v.fget
if v.fset or v.fdel:
raise ValueError('cannot wrap properties with setters or deleters')
attrs[k] = col
v = col
if isinstance(v, Column):
v.name = v.name or k
columns[v.name] = v
attrs['__columns__'] = [v for _, v in sorted(columns.items())]
return super(DBMetaclass, cls).__new__(cls, name, bases, attrs)
@six.add_metaclass(DBMetaclass)
class DBObject(object):
def __init__(self, *args, **kwargs):
self.id = None
self.is_dirty = True
def _connect(self):
return self.home.db.connect()
def id_or_persist(self, *args, **kwargs):
return self.id or self.persist_in_db(*args, **kwargs)
def persist_in_db(self, con=None, force=False):
if not self.is_dirty and not force:
return self.id
data = {}
for col in self.__columns__:
try:
if col._persist:
data[col.name] = col._persist(self)
elif col._getter:
data[col.name] = col._getter(self)
else:
data[col.name] = self.__dict__[col.name]
except KeyError:
pass
con = con or self._connect()
if self.id:
con.update(self.__tablename__, data, {'id': self.id})
else:
self.id = con.insert(self.__tablename__, data)
log.debug('%s added to %s with ID %d' % (self.__class__.__name__, self.__tablename__, self.id))
self.is_dirty = False
return self.id
def restore_from_row(self, row, ignore=None):
try:
if self.id and self.id != row['id']:
log.warning('Restoring from a mismatched ID; %s %d != %d' % (self.__tablename__, self.id, row['id']))
self.id = row['id']
except KeyError:
pass
for col in self.__columns__:
try:
val = row[col.name]
except KeyError:
continue
if ignore and col.name in ignore:
continue
if col._restore:
col._restore(self, val)
else:
self.__dict__[col.name] = val
| 28.377897
| 125
| 0.58191
|
404bad75969548a67643665b744d9c3a386e3a2f
| 1,451
|
py
|
Python
|
analysis/mixed/plot_cloud_all_storm.py
|
coryschwartz/nebula-crawler
|
34ebe1109a5117949b4f285891a065adcc0bae08
|
[
"Apache-2.0"
] | 66
|
2021-07-05T21:55:27.000Z
|
2022-03-20T20:44:38.000Z
|
analysis/mixed/plot_cloud_all_storm.py
|
coryschwartz/nebula-crawler
|
34ebe1109a5117949b4f285891a065adcc0bae08
|
[
"Apache-2.0"
] | 8
|
2021-07-18T09:00:12.000Z
|
2022-03-15T17:44:11.000Z
|
analysis/mixed/plot_cloud_all_storm.py
|
coryschwartz/nebula-crawler
|
34ebe1109a5117949b4f285891a065adcc0bae08
|
[
"Apache-2.0"
] | 6
|
2021-07-11T12:25:05.000Z
|
2022-01-04T21:14:50.000Z
|
import psycopg2
import toml
import matplotlib.pyplot as plt
from lib import node_time, node_classification, node_agent, node_cloud
# Helper function to trim agent version
def trim_agent(agent):
if agent.startswith("/"):
version = agent[1:]
if agent.startswith("go-ipfs"):
return "go-ipfs"
elif agent.startswith("hydra-booster"):
return "hydra-booster"
elif agent.startswith("storm"):
return "storm"
elif agent.startswith("ioi"):
return "ioi"
else:
return "others"
config = toml.load("./db.toml")['psql']
conn = psycopg2.connect(
host=config['host'],
port=config['port'],
database=config['database'],
user=config['user'],
password=config['password'],
)
start, end = node_time.get_time_range(conn)
# Get storm node ids
all = node_classification.get_all_nodes(conn, start, end)
agents = node_agent.get_agent_version(conn, all)
storm = set()
for id, agent in agents.items():
agent = trim_agent(agent)
if agent == "storm":
storm.add(id)
clouds = node_cloud.get_cloud(conn, storm)
counts = dict()
sum = 0
for _, cloud in clouds.items():
if cloud in counts:
counts[cloud] += 1
else:
counts[cloud] = 1
# Plot
plt.rc('font', size=8)
plt.pie(counts.values(), labels=counts.keys(), autopct="%.1f%%")
plt.title("Storm nodes cloud info from %s to %s" % (start.replace(microsecond=0), end.replace(microsecond=0)))
plt.show()
| 25.45614
| 110
| 0.660924
|
e369556799923a5bb8759c482c0e7c915b90e690
| 4,757
|
py
|
Python
|
ACME/model/model.py
|
mauriziokovacic/ACME
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | 3
|
2019-10-23T23:10:55.000Z
|
2021-09-01T07:30:14.000Z
|
ACME/model/model.py
|
mauriziokovacic/ACME-Python
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | null | null | null |
ACME/model/model.py
|
mauriziokovacic/ACME-Python
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | 1
|
2020-07-11T11:35:43.000Z
|
2020-07-11T11:35:43.000Z
|
import warnings
import torch
from ..fileio.fileparts import *
from .freeze import *
from .unfreeze import *
class Model(torch.nn.Module):
"""
A class representing a generic model architecture
Attributes
----------
name : str
the name of the model
device : str or torch.device
the device to store the tensors to
Methods
-------
freeze()
freezes all the model parameters
unfreeze()
unfreezes all the model parameters
save_model(path)
stores the model state in the given path
load_model(path)
loads a model from the given path
save_checkpoint(path)
stores the model checkpoint in the given path
load_checkpoint(path)
loads the model checkpoint from the given path
"""
def __init__(self, name='Model', device='cuda:0', **kwargs):
"""
Parameters
----------
name : str (optional)
the name of the model (default is 'Model')
device : str or torch.device (optional)
the device to store the tensors to (default is 'cuda:0')
"""
super(Model, self).__init__()
self.name = name
self.__device = device
def freeze(self):
"""
Freezes all the model parameters
Returns
-------
Model
the model itself
"""
freeze(self)
return self
def unfreeze(self):
"""
Unfreezes all the model parameters
Returns
-------
Model
the model itself
"""
unfreeze(self)
return self
def save_model(self, filename):
"""
Stores the model state in the given filename.
The file extension will be forced to '.pth'
Parameters
----------
filename : str
the filename to store the model to
Returns
-------
Model
the model itself
"""
path, file = fileparts(filename)[:2]
torch.save(self, path + file + '.pth')
return self
def load_model(self, filename, strict=True):
"""
Loads a model from the given filename
The file extension will be forced to '.pth'
Parameters
----------
filename : str
the filename to load the model from
strict : bool (optional)
if True loaded model must completely match with this. If False only compatible parameters will be loaded
Returns
-------
Model
the model itself
"""
path, file = fileparts(filename)[:2]
other = torch.load(path + file + '.pth', map_location=self.device)
if strict and (not isinstance(other, self.__class__)):
warnings.warn('Class type mismatch. Loading model failed', category=RuntimeWarning)
return self
self.load_state_dict(other.state_dict(), map_location=self.device, strict=strict)
self.eval()
return self
def save_checkpoint(self, filename):
"""
Stores the model checkpoint in the given filename
The file extension will be forced to '.tar'
Parameters
----------
filename : str
the filename to store the model checkpoint to
Returns
-------
Model
the model itself
"""
path, file = fileparts(filename)[:2]
torch.save({'model_state_dict': self.state_dict()}, path + file + '.tar')
return self
def load_checkpoint(self, filename, strict=True):
"""
Loads the model checkpoint from the given filename
The file extension will be forced to '.tar'
Parameters
----------
filename : str
the filename to load the model checkpoint from
strict : bool (optional)
if True loaded checkpoint must completely match with the model parameters.
If False only compatible parameters will be loaded
Returns
-------
Model
the model itself
"""
path, file = fileparts(filename)[:2]
checkpoint = torch.load(path + file + '.tar')
self.load_state_dict(checkpoint['model_state_dict'], map_location=self.device, strict=strict)
self.to(device=self.device)
self.train()
return self
@property
def device(self):
return self.__device
@device.setter
def device(self, value):
self.to(device=value)
def to(self, **kwargs):
if 'device' in kwargs:
self.__device = kwargs['device']
self = super(Model, self).to(**kwargs)
return self
| 25.713514
| 116
| 0.559596
|
9f395608eaed2d63e107c7cb5f81fa82744ffd89
| 32,733
|
py
|
Python
|
DetVisGUI_test.py
|
carranza96/DetVisGUI
|
84554abf938e2b22380f2cbb14afe11a76b4b1af
|
[
"MIT"
] | 127
|
2020-05-20T06:14:13.000Z
|
2022-03-31T08:13:06.000Z
|
DetVisGUI_test.py
|
carranza96/DetVisGUI
|
84554abf938e2b22380f2cbb14afe11a76b4b1af
|
[
"MIT"
] | 12
|
2020-10-12T17:36:51.000Z
|
2022-03-31T07:50:23.000Z
|
DetVisGUI_test.py
|
carranza96/DetVisGUI
|
84554abf938e2b22380f2cbb14afe11a76b4b1af
|
[
"MIT"
] | 18
|
2020-07-29T08:23:12.000Z
|
2022-02-26T02:40:11.000Z
|
# __author__ = 'ChienHung Chen in Academia Sinica IIS'
import argparse
import itertools
import json
import os
import pickle
import xml.etree.ElementTree as ET
from tkinter import (END, Button, Checkbutton, E, Entry, IntVar, Label,
Listbox, Menu, N, S, Scrollbar, StringVar, Tk, W, ttk)
import cv2
import matplotlib
import mmcv
from mmdet.apis import init_detector, inference_detector
import numpy as np
import platform
import pycocotools.mask as maskUtils
from PIL import Image, ImageTk
matplotlib.use('TkAgg')
def parse_args():
parser = argparse.ArgumentParser(description='DetVisGUI')
parser.add_argument('config',
default='./config/mask_rcnn_r50_fpn_1x_coco.py',
help='config file path')
parser.add_argument('ckpt',
default='./checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth',
help='checkpoint file path')
parser.add_argument('img_root',
default='./data/test_images',
help='test image path')
parser.add_argument('--device', default='cuda', help='inference device')
parser.add_argument(
'--no_gt',
default=True,
help='test images without groundtruth')
parser.add_argument(
'--det_box_color', default=(255, 255, 0), help='detection box color')
parser.add_argument(
'--gt_box_color',
default=(255, 255, 255),
help='groundtruth box color')
parser.add_argument('--output', default='output', help='image save folder')
args = parser.parse_args()
return args
class COCO_dataset:
def __init__(self, cfg, args):
self.dataset = 'COCO'
self.img_root = args.img_root
self.config_file = args.config
self.checkpoint_file = args.ckpt
self.mask = False
self.device = args.device
# according json to get category, image list, and annotations.
self.img_list = self.get_img_list()
# coco categories
self.aug_category = aug_category([
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
])
def get_img_list(self):
img_list = list()
for image in sorted(os.listdir(self.img_root)):
img_list.append(image)
return img_list
def get_img_by_name(self, name):
img = Image.open(os.path.join(self.img_root, name)).convert('RGB')
return img
def get_img_by_index(self, idx):
img = Image.open(os.path.join(self.img_root,
self.img_list[idx])).convert('RGB')
return img
# main GUI
class vis_tool:
def __init__(self):
self.args = parse_args()
cfg = mmcv.Config.fromfile(self.args.config)
self.window = Tk()
self.menubar = Menu(self.window)
self.info = StringVar()
self.info_label = Label(
self.window, bg='yellow', width=4, textvariable=self.info)
self.listBox_img = Listbox(
self.window, width=50, height=25, font=('Times New Roman', 10))
self.listBox_obj = Listbox(
self.window, width=50, height=12, font=('Times New Roman', 10))
self.scrollbar_img = Scrollbar(
self.window, width=15, orient='vertical')
self.scrollbar_obj = Scrollbar(
self.window, width=15, orient='vertical')
self.listBox_img_info = StringVar()
self.listBox_img_label = Label(
self.window,
font=('Arial', 11),
bg='yellow',
width=4,
height=1,
textvariable=self.listBox_img_info)
self.listBox_obj_info = StringVar()
self.listBox_obj_label1 = Label(
self.window,
font=('Arial', 11),
bg='yellow',
width=4,
height=1,
textvariable=self.listBox_obj_info)
self.listBox_obj_label2 = Label(
self.window,
font=('Arial', 11),
bg='yellow',
width=4,
height=1,
text='Object Class : Score')
self.data_info = COCO_dataset(cfg, self.args)
self.info.set('DATASET: {}'.format(self.data_info.dataset))
# load image and show it on the window
self.img = self.data_info.get_img_by_index(0)
self.photo = ImageTk.PhotoImage(self.img)
self.label_img = Label(self.window, image=self.photo)
self.show_det_txt = IntVar(value=1)
self.checkbn_det_txt = Checkbutton(
self.window,
text='Text',
font=('Arial', 10, 'bold'),
variable=self.show_det_txt,
command=self.change_img,
fg='#0000FF')
self.show_dets = IntVar(value=1)
self.checkbn_det = Checkbutton(
self.window,
text='Detections',
font=('Arial', 10, 'bold'),
variable=self.show_dets,
command=self.change_img,
fg='#0000FF')
self.combo_label = Label(
self.window,
bg='yellow',
width=10,
height=1,
text='Show Category',
font=('Arial', 11))
self.combo_category = ttk.Combobox(
self.window,
font=('Arial', 11),
values=self.data_info.aug_category.combo_list)
self.combo_category.current(0)
self.th_label = Label(
self.window,
font=('Arial', 11),
bg='yellow',
width=10,
height=1,
text='Score Threshold')
self.threshold = np.float32(0.5)
self.th_entry = Entry(
self.window,
font=('Arial', 11),
width=10,
textvariable=StringVar(self.window, value=str(self.threshold)))
self.th_button = Button(
self.window, text='Enter', height=1, command=self.change_threshold)
self.find_label = Label(
self.window,
font=('Arial', 11),
bg='yellow',
width=10,
height=1,
text='find')
self.find_name = ''
self.find_entry = Entry(
self.window,
font=('Arial', 11),
width=10,
textvariable=StringVar(self.window, value=str(self.find_name)))
self.find_button = Button(
self.window, text='Enter', height=1, command=self.findname)
self.listBox_img_idx = 0
# ====== ohter attribute ======
self.img_name = ''
self.show_img = None
self.output = self.args.output
self.model = init_detector(
self.data_info.config_file,
self.data_info.checkpoint_file,
device=self.data_info.device)
if not os.path.isdir(self.output):
os.makedirs(self.output)
self.img_list = self.data_info.img_list
# flag for find/threshold button switch focused element
self.button_clicked = False
def change_threshold(self, event=None):
try:
self.threshold = np.float32(self.th_entry.get())
self.change_img()
# after changing threshold, focus on listBox for easy control
if self.window.focus_get() == self.listBox_obj:
self.listBox_obj.focus()
else:
self.listBox_img.focus()
self.button_clicked = True
except ValueError:
self.window.title('Please enter a number as score threshold.')
# draw groundtruth
def draw_gt_boxes(self, img, objs):
for obj in objs:
cls_name = obj[0]
# according combobox to decide whether to plot this category
if self.combo_category.get() == 'All':
show_category = self.data_info.aug_category.category
else:
show_category = [self.combo_category.get()]
if cls_name not in show_category:
continue
box = obj[1:]
xmin = max(box[0], 0)
ymin = max(box[1], 0)
xmax = min(box[0] + box[2], self.img_width)
ymax = min(box[1] + box[3], self.img_height)
font = cv2.FONT_HERSHEY_SIMPLEX
if self.show_gt_txt.get():
if ymax + 30 >= self.img_height:
cv2.rectangle(img, (xmin, ymin),
(xmin + len(cls_name) * 10, int(ymin - 20)),
(255, 140, 0), cv2.FILLED)
cv2.putText(img, cls_name, (xmin, int(ymin - 5)), font,
0.5, (255, 255, 255), 1)
else:
cv2.rectangle(img, (xmin, ymax),
(xmin + len(cls_name) * 10, int(ymax + 20)),
(255, 140, 0), cv2.FILLED)
cv2.putText(img, cls_name, (xmin, int(ymax + 15)), font,
0.5, (255, 255, 255), 1)
cv2.rectangle(img, (xmin, ymin), (xmax, ymax),
self.args.gt_box_color, 1)
return img
def draw_all_det_boxes(self, img, single_detection):
for idx, cls_objs in enumerate(single_detection):
category = self.data_info.aug_category.category[idx]
if self.combo_category.get() == 'All':
show_category = self.data_info.aug_category.category
else:
show_category = [self.combo_category.get()]
if category not in show_category:
continue
for obj_idx, obj in enumerate(cls_objs):
[score, box] = [round(obj[4], 2), obj[:4]]
if score >= self.threshold:
box = list(map(int, list(map(round, box))))
xmin = max(box[0], 0)
ymin = max(box[1], 0)
xmax = min(box[2], self.img_width)
ymax = min(box[3], self.img_height)
if self.show_det_txt.get():
font = cv2.FONT_HERSHEY_SIMPLEX
text = category + ' : ' + str(score)
if ymax + 30 >= self.img_height:
cv2.rectangle(
img, (xmin, ymin),
(xmin + len(text) * 9, int(ymin - 20)),
(0, 0, 255), cv2.FILLED)
cv2.putText(img, text, (xmin, int(ymin - 5)), font,
0.5, (255, 255, 255), 1)
else:
cv2.rectangle(
img, (xmin, ymax),
(xmin + len(text) * 9, int(ymax + 20)),
(0, 0, 255), cv2.FILLED)
cv2.putText(img, text, (xmin, int(ymax + 15)),
font, 0.5, (255, 255, 255), 1)
cv2.rectangle(
img, (xmin, ymin), (xmax, ymax),
self.args.det_box_color, 2)
return img
def draw_all_det_boxes_masks(self, img, single_detection):
img = np.require(img, requirements=['W'])
boxes, masks = single_detection
# draw segmentation masks
# reference mmdetection/mmdet/models/detectors/base.py
if self.combo_category.get() != 'All':
show_idx = self.data_info.aug_category.category.index(
self.combo_category.get())
masks = np.asarray([masks[show_idx]])
boxes = np.asarray([boxes[show_idx]])
category = self.data_info.aug_category.category[show_idx]
segms = list(itertools.chain(*masks))
bboxes = np.vstack(boxes)
inds = np.where(np.round(bboxes[:, -1], 2) >= self.threshold)[0]
self.color_list = []
for i in inds:
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
if type(segms[0]) == np.ndarray:
mask = segms[i]
elif type(segms[0]) == dict:
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
self.color_list.append('#%02x%02x%02x' % tuple(color_mask[0]))
# draw bounding box
for idx, cls_objs in enumerate(boxes):
if self.combo_category.get() == 'All':
category = self.data_info.aug_category.category[idx]
for obj_idx, obj in enumerate(cls_objs):
[score, box] = [round(obj[4], 2), obj[:4]]
if score >= self.threshold:
box = list(map(int, list(map(round, box))))
xmin = max(box[0], 0)
ymin = max(box[1], 0)
xmax = min(box[2], self.img_width)
ymax = min(box[3], self.img_height)
if self.show_det_txt.get():
font = cv2.FONT_HERSHEY_SIMPLEX
text = category + ' : ' + str(score)
if ymax + 30 >= self.img_height:
cv2.rectangle(
img, (xmin, ymin),
(xmin + len(text) * 9, int(ymin - 20)),
(0, 0, 255), cv2.FILLED)
cv2.putText(img, text, (xmin, int(ymin - 5)), font,
0.5, (255, 255, 255), 1)
else:
cv2.rectangle(
img, (xmin, ymax),
(xmin + len(text) * 9, int(ymax + 20)),
(0, 0, 255), cv2.FILLED)
cv2.putText(img, text, (xmin, int(ymax + 15)),
font, 0.5, (255, 255, 255), 1)
cv2.rectangle(
img, (xmin, ymin), (xmax, ymax),
self.args.det_box_color, 2)
return img
def get_dets(self, det_results): # [(bg + cls), images]
det_results = np.asarray(det_results, dtype=object)
# dim should be (class, image), mmdetection format: (image, class)
if len(det_results.shape) == 2:
self.data_info.mask = True
return det_results
def change_img(self, event=None):
if len(self.listBox_img.curselection()) != 0:
self.listBox_img_idx = self.listBox_img.curselection()[0]
self.listBox_img_info.set('Image {:6} / {:6}'.format(
self.listBox_img_idx + 1, self.listBox_img.size()))
name = self.listBox_img.get(self.listBox_img_idx)
self.window.title('DATASET : ' + self.data_info.dataset + ' ' + name)
img = self.data_info.get_img_by_name(name)
self.img_width, self.img_height = img.width, img.height
img = np.asarray(img)
self.img_name = name
self.img = img
result = inference_detector(self.model, img)
self.dets = self.get_dets(result)
if self.show_dets.get():
if self.data_info.mask is False:
img = self.draw_all_det_boxes(img, self.dets)
else:
img = self.draw_all_det_boxes_masks(img, self.dets)
self.clear_add_listBox_obj()
self.show_img = img
img = Image.fromarray(img)
img = self.scale_img(img)
self.photo = ImageTk.PhotoImage(img)
self.label_img.config(image=self.photo)
self.window.update_idletasks()
if self.img_name in os.listdir(self.output):
self.listBox_img_label.config(bg='#CCFF99')
else:
self.listBox_img_label.config(bg='yellow')
def draw_one_det_boxes(self, img, single_detection, selected_idx=-1):
idx_counter = 0
for idx, cls_objs in enumerate(single_detection):
category = self.data_info.aug_category.category[idx]
if self.combo_category.get() == 'All':
show_category = self.data_info.aug_category.category
else:
show_category = [self.combo_category.get()]
if category not in show_category:
continue
for obj_idx, obj in enumerate(cls_objs):
[score, box] = [round(obj[4], 2), obj[:4]]
if score >= self.threshold:
if idx_counter == selected_idx:
box = list(map(int, list(map(round, box))))
xmin = max(box[0], 0)
ymin = max(box[1], 0)
xmax = min(box[2], self.img_width)
ymax = min(box[3], self.img_height)
if self.show_det_txt.get():
font = cv2.FONT_HERSHEY_SIMPLEX
text = category + ' : ' + str(score)
if ymax + 30 >= self.img_height:
cv2.rectangle(
img, (xmin, ymin),
(xmin + len(text) * 9, int(ymin - 20)),
(0, 0, 255), cv2.FILLED)
cv2.putText(img, text, (xmin, int(ymin - 5)),
font, 0.5, (255, 255, 255), 1)
else:
cv2.rectangle(
img, (xmin, ymax),
(xmin + len(text) * 9, int(ymax + 20)),
(0, 0, 255), cv2.FILLED)
cv2.putText(img, text, (xmin, int(ymax + 15)),
font, 0.5, (255, 255, 255), 1)
cv2.rectangle(
img, (xmin, ymin), (xmax, ymax),
self.args.det_box_color, 2)
return img
else:
idx_counter += 1
def draw_one_det_boxes_masks(self, img, single_detection, selected_idx=-1):
img = np.require(img, requirements=['W'])
boxes, masks = single_detection
# draw segmentation masks
# reference mmdetection/mmdet/models/detectors/base.py
if self.combo_category.get() != 'All':
show_idx = self.data_info.aug_category.category.index(
self.combo_category.get())
category = self.data_info.aug_category.category[
show_idx] # fixed category
masks = np.asarray([masks[show_idx]])
boxes = np.asarray([boxes[show_idx]])
segms = list(itertools.chain(*masks))
bboxes = np.vstack(boxes)
inds = np.where(np.round(bboxes[:, -1], 2) >= self.threshold)[0]
self.color_list = []
for inds_idx, i in enumerate(inds):
if inds_idx == selected_idx:
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
if type(segms[0]) == np.ndarray:
mask = segms[i]
elif type(segms[0]) == dict:
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
self.color_list.append('#%02x%02x%02x' % tuple(color_mask[0]))
# draw bounding box
idx_counter = 0
for idx, cls_objs in enumerate(boxes):
if self.combo_category.get() == 'All':
category = self.data_info.aug_category.category[idx]
for obj_idx, obj in enumerate(cls_objs):
[score, box] = [round(obj[4], 2), obj[:4]]
if score >= self.threshold:
if idx_counter == selected_idx:
box = list(map(int, list(map(round, box))))
xmin = max(box[0], 0)
ymin = max(box[1], 0)
xmax = min(box[2], self.img_width)
ymax = min(box[3], self.img_height)
if self.show_det_txt.get():
font = cv2.FONT_HERSHEY_SIMPLEX
text = category + ' : ' + str(score)
if ymax + 30 >= self.img_height:
cv2.rectangle(
img, (xmin, ymin),
(xmin + len(text) * 9, int(ymin - 20)),
(0, 0, 255), cv2.FILLED)
cv2.putText(img, text, (xmin, int(ymin - 5)),
font, 0.5, (255, 255, 255), 1)
else:
cv2.rectangle(
img, (xmin, ymax),
(xmin + len(text) * 9, int(ymax + 20)),
(0, 0, 255), cv2.FILLED)
cv2.putText(img, text, (xmin, int(ymax + 15)),
font, 0.5, (255, 255, 255), 1)
cv2.rectangle(img, (xmin, ymin), (xmax, ymax),
self.args.det_box_color, 2)
return img
else:
idx_counter += 1
# plot only one object
def change_obj(self, event=None):
if len(self.listBox_obj.curselection()) == 0:
self.listBox_img.focus()
return
else:
listBox_obj_idx = self.listBox_obj.curselection()[0]
self.listBox_obj_info.set('Detected Object : {:4} / {:4}'.format(
listBox_obj_idx + 1, self.listBox_obj.size()))
name = self.listBox_img.get(self.listBox_img_idx)
img = self.data_info.get_img_by_name(name)
self.img_width, self.img_height = img.width, img.height
img = np.asarray(img)
self.img_name = name
self.img = img
if self.show_dets.get():
if self.data_info.mask is False:
img = self.draw_one_det_boxes(img, self.dets, listBox_obj_idx)
else:
img = self.draw_one_det_boxes_masks(img, self.dets, listBox_obj_idx)
self.show_img = img
img = Image.fromarray(img)
img = self.scale_img(img)
self.photo = ImageTk.PhotoImage(img)
self.label_img.config(image=self.photo)
self.window.update_idletasks()
if self.img_name in os.listdir(self.output):
self.listBox_img_label.config(bg='#CCFF99')
else:
self.listBox_img_label.config(bg='yellow')
# ============================================
def scale_img(self, img):
[s_w, s_h] = [1, 1]
# if window size is (1920, 1080),
# the default max image size is (1440, 810)
(fix_width, fix_height) = (1440, 810)
# change image size according to window size
if self.window.winfo_width() != 1:
fix_width = (
self.window.winfo_width() - self.listBox_img.winfo_width() -
self.scrollbar_img.winfo_width() - 5)
# fix_height = int(fix_width * 9 / 16)
fix_height = 750
# handle image size is too big
if img.width > fix_width:
s_w = fix_width / img.width
if img.height > fix_height:
s_h = fix_height / img.height
scale = min(s_w, s_h)
img = img.resize((int(img.width * scale), int(img.height * scale)),
Image.ANTIALIAS)
return img
def clear_add_listBox_obj(self):
self.listBox_obj.delete(0, 'end')
if self.data_info.mask is False:
single_detection = self.dets
else:
single_detection, single_mask = self.dets
if self.combo_category.get() == 'All':
show_category = self.data_info.aug_category.category
else:
show_category = [self.combo_category.get()]
num = 0
for idx, cls_objs in enumerate(single_detection):
category = self.data_info.aug_category.category[idx]
if category not in show_category:
continue
for obj_idx, obj in enumerate(cls_objs):
score = np.round(obj[4], 2)
if score >= self.threshold:
self.listBox_obj.insert('end', category + " : " + str(score))
num += 1
self.listBox_obj_info.set('Detected Object : {:3}'.format(num))
def change_threshold_button(self, v):
self.threshold += v
if self.threshold <= 0:
self.threshold = 0
elif self.threshold >= 1:
self.threshold = 1
self.th_entry.delete(0, END)
self.th_entry.insert(0, str(round(self.threshold, 2)))
self.change_threshold()
def save_img(self):
print('Save image to ' + os.path.join(self.output, self.img_name))
cv2.imwrite(
os.path.join(self.output, self.img_name),
cv2.cvtColor(self.show_img, cv2.COLOR_BGR2RGB))
self.listBox_img_label.config(bg='#CCFF99')
def eventhandler(self, event):
entry_list = [self.find_entry, self.th_entry]
if self.window.focus_get() not in entry_list:
if platform.system() == 'Windows':
state_1key = 8
state_2key = 12
else: # 'Linux'
state_1key = 16
state_2key = 20
if event.state == state_1key and event.keysym == 'Left':
self.change_threshold_button(-0.1)
elif event.state == state_1key and event.keysym == 'Right':
self.change_threshold_button(0.1)
elif event.keysym == 'q':
self.window.quit()
elif event.keysym == 's':
self.save_img()
if self.button_clicked:
self.button_clicked = False
else:
if event.keysym in ['KP_Enter', 'Return']:
self.listBox_obj.focus()
self.listBox_obj.select_set(0)
elif event.keysym == 'Escape':
self.change_img()
self.listBox_img.focus()
def combobox_change(self, event=None):
self.listBox_img.focus()
self.change_img()
def clear_add_listBox_img(self):
self.listBox_img.delete(0, 'end') # delete listBox_img 0 ~ end items
# add image name to listBox_img
for item in self.img_list:
self.listBox_img.insert('end', item)
self.listBox_img.select_set(0)
self.listBox_img.focus()
self.change_img()
def findname(self, event=None):
self.find_name = self.find_entry.get()
new_list = []
if self.find_name == '':
new_list = self.data_info.img_list
else:
for img_name in self.data_info.img_list:
if self.find_name[0] == '!':
if self.find_name[1:] not in img_name:
new_list.append(img_name)
else:
if self.find_name in img_name:
new_list.append(img_name)
if len(new_list) != 0:
self.img_list = new_list
self.clear_add_listBox_img()
self.clear_add_listBox_obj()
self.button_clicked = True
else:
self.window.title("Can't find any image about '{}'".format(
self.find_name))
def run(self):
self.window.title('DATASET : ' + self.data_info.dataset)
self.window.geometry('1280x800+350+100')
# self.menubar.add_command(label='QUIT', command=self.window.quit)
self.window.config(menu=self.menubar) # display the menu
self.scrollbar_img.config(command=self.listBox_img.yview)
self.listBox_img.config(yscrollcommand=self.scrollbar_img.set)
self.scrollbar_obj.config(command=self.listBox_obj.yview)
self.listBox_obj.config(yscrollcommand=self.scrollbar_obj.set)
layer1 = 0
layer2 = 50
# ======================= layer 1 =========================
# combobox
self.combo_label.grid(
row=layer1 + 30,
column=0,
sticky=W + E + N + S,
padx=3,
pady=3,
columnspan=6)
self.combo_category.grid(
row=layer1 + 30,
column=6,
sticky=W + E + N + S,
padx=3,
pady=3,
columnspan=6)
# show det
self.checkbn_det.grid(
row=layer1 + 40,
column=0,
sticky=N + S,
padx=3,
pady=3,
columnspan=4)
# show det text
self.checkbn_det_txt.grid(
row=layer1 + 40,
column=4,
sticky=N + S,
padx=3,
pady=3,
columnspan=2)
# ======================= layer 2 =========================
self.listBox_img_label.grid(
row=layer2 + 0, column=0, sticky=N + S + E + W, columnspan=12)
# find name
self.find_label.grid(
row=layer2 + 20, column=0, sticky=E + W, columnspan=4)
self.find_entry.grid(
row=layer2 + 20, column=4, sticky=E + W, columnspan=4)
self.find_button.grid(
row=layer2 + 20, column=8, sticky=E + W, pady=3, columnspan=4)
self.scrollbar_img.grid(row=layer2 + 30, column=11, sticky=N + S + W)
self.label_img.grid(
row=layer1 + 30,
column=12,
sticky=N + E,
padx=3,
pady=3,
rowspan=110)
self.listBox_img.grid(
row=layer2 + 30,
column=0,
sticky=N + S + E + W,
pady=3,
columnspan=11)
self.th_label.grid(
row=layer2 + 40, column=0, sticky=E + W, columnspan=6)
self.th_entry.grid(
row=layer2 + 40, column=6, sticky=E + W, columnspan=3)
self.th_button.grid(
row=layer2 + 40, column=9, sticky=E + W, columnspan=3)
self.listBox_obj_label1.grid(
row=layer2 + 60, column=0, sticky=E + W, pady=3, columnspan=12)
self.listBox_obj_label2.grid(
row=layer2 + 70,
column=0,
sticky=E + W,
pady=2,
columnspan=12)
self.scrollbar_obj.grid(
row=layer2 + 80, column=11, sticky=N + S + W, pady=3)
self.listBox_obj.grid(
row=layer2 + 80,
column=0,
sticky=N + S + E + W,
pady=3,
columnspan=11)
self.clear_add_listBox_img()
self.listBox_img.bind('<<ListboxSelect>>', self.change_img)
self.listBox_img.bind_all('<KeyRelease>', self.eventhandler)
self.listBox_obj.bind('<<ListboxSelect>>', self.change_obj)
self.th_entry.bind('<Return>', self.change_threshold)
self.th_entry.bind('<KP_Enter>', self.change_threshold)
self.find_entry.bind('<Return>', self.findname)
self.find_entry.bind('<KP_Enter>', self.findname)
self.combo_category.bind('<<ComboboxSelected>>', self.combobox_change)
self.window.mainloop()
class aug_category:
def __init__(self, categories):
self.category = categories
self.combo_list = categories.copy()
self.combo_list.insert(0, 'All')
self.all = True
if __name__ == '__main__':
vis_tool().run()
| 35.852136
| 84
| 0.506584
|
b110ff9ffe5fad9ca568ee61d99837d5ceee0c08
| 2,884
|
py
|
Python
|
perplexity.py
|
Artemis-ii/LDA
|
496e43f875ef35084cfb8539ee8bfe4e4fe68fb7
|
[
"Apache-2.0"
] | null | null | null |
perplexity.py
|
Artemis-ii/LDA
|
496e43f875ef35084cfb8539ee8bfe4e4fe68fb7
|
[
"Apache-2.0"
] | null | null | null |
perplexity.py
|
Artemis-ii/LDA
|
496e43f875ef35084cfb8539ee8bfe4e4fe68fb7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 1 16:46:55 2018
@author: 月神少女
"""
import math
import matplotlib.pyplot as plt
def dictionary_found(wordlist): #对模型训练出来的词转换成一个词为KEY,概率为值的字典。
word_dictionary1={}
for i in range(len(wordlist)):
if i%2==0:
if word_dictionary1.__contains__(wordlist[i])==True:
word_probability=word_dictionary1.get(wordlist[i])
word_probability=float(word_probability)
word_dictionary1.update({wordlist[i]:word_probability})
else:
word_dictionary1.update({wordlist[i]:wordlist[i+1]})
else:
pass
return word_dictionary1
def look_into_dic(dictionary,testset): #对于测试集的每一个词,在字典中查找其概率。
'''Calculates the TF-list for perplexity'''
frequency=[]
letter_list=[]
a=0.0
for letter in testset.split():
if letter not in letter_list:
letter_list.append(letter)
letter_frequency=(dictionary.get(letter))
frequency.append(letter_frequency)
else:
pass
for each in frequency:
if each!=None:
a+=float(each)
else:
pass
return a
def f_testset_word_count(testset): #测试集的词数统计
'''reture the sum of words in testset which is the denominator of the formula of Perplexity'''
testset_clean=testset.split()
return (len(testset_clean)-testset.count("\n"))
def f_perplexity(word_frequency,word_count): #计算困惑度
'''Search the probability of each word in dictionary
Calculates the perplexity of the LDA model for every parameter T'''
duishu=-math.log(word_frequency)
kuohaoli=duishu/word_count
perplexity=math.exp(kuohaoli)
return perplexity
def graph_draw(topic,perplexity): #做主题数与困惑度的折线图
x=topic
y=perplexity
plt.plot(x,y,color="red",linewidth=2)
plt.xlabel("Number of Topic")
plt.ylabel("Perplexity")
plt.show()
topic=[i for i in range(10,110,10)]
perplexity_list=[]
f1=open('data/cart_text_cut.txt','r', encoding = 'utf-8') #测试集目录
testset=f1.read()
testset_word_count=f_testset_word_count(testset) #call the function to count the sum-words in testset
for i in topic:
dictionary={}
trace="data/result/topic_word_"+str(i)+".txt" #模型目录
f=open(trace,'r', encoding = 'utf-8')
text=f.readlines()
word_list=[]
for line in text:
if line.startswith("Topic") or line is None:
continue
line_clean=line.split()
word_list.extend(line_clean)
word_dictionary=dictionary_found(word_list)
frequency=look_into_dic(word_dictionary,testset)
perplexity=f_perplexity(frequency,testset_word_count)
perplexity_list.append(perplexity)
graph_draw(topic,perplexity_list)
f.close()
f1.close()
| 31.692308
| 109
| 0.643551
|
f101f2a323003e61bc00bd65305f7602125aa280
| 4,574
|
py
|
Python
|
benchmark/startQiskit_Class1687.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_Class1687.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_Class1687.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=59
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[0]) # number=43
prog.cz(input_qubit[4],input_qubit[0]) # number=44
prog.h(input_qubit[0]) # number=45
prog.h(input_qubit[0]) # number=56
prog.cz(input_qubit[4],input_qubit[0]) # number=57
prog.h(input_qubit[0]) # number=58
prog.z(input_qubit[4]) # number=47
prog.cx(input_qubit[4],input_qubit[0]) # number=48
prog.h(input_qubit[0]) # number=37
prog.cz(input_qubit[4],input_qubit[0]) # number=38
prog.h(input_qubit[0]) # number=39
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(-1.0430087609918113,input_qubit[4]) # number=36
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.cx(input_qubit[1],input_qubit[0]) # number=52
prog.x(input_qubit[0]) # number=53
prog.cx(input_qubit[1],input_qubit[0]) # number=54
prog.h(input_qubit[0]) # number=49
prog.cz(input_qubit[1],input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=51
prog.x(input_qubit[1]) # number=10
prog.rx(-0.06597344572538572,input_qubit[3]) # number=27
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.x(input_qubit[2]) # number=23
prog.h(input_qubit[2]) # number=28
prog.cz(input_qubit[0],input_qubit[2]) # number=29
prog.h(input_qubit[2]) # number=30
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[4]) # number=35
prog.h(input_qubit[0]) # number=17
prog.rx(2.4912829742967055,input_qubit[2]) # number=26
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[2]) # number=55
prog.h(input_qubit[2]) # number=25
prog.h(input_qubit[3]) # number=20
# circuit end
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =7924
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class1687.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.439716
| 80
| 0.612593
|
f66f592a57dadacb3d195ad26bab6c81d331895c
| 15,752
|
py
|
Python
|
zoo/policies/cross-rl-agent/cross_rl_agent/train/run_train.py
|
MCZhi/SMARTS
|
3ef5650b04ac6fb7145cf4e23d5534d73e0929fc
|
[
"MIT"
] | 2
|
2021-12-13T12:41:54.000Z
|
2021-12-16T03:10:24.000Z
|
zoo/policies/cross-rl-agent/cross_rl_agent/train/run_train.py
|
MCZhi/SMARTS
|
3ef5650b04ac6fb7145cf4e23d5534d73e0929fc
|
[
"MIT"
] | null | null | null |
zoo/policies/cross-rl-agent/cross_rl_agent/train/run_train.py
|
MCZhi/SMARTS
|
3ef5650b04ac6fb7145cf4e23d5534d73e0929fc
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# The author of this file is: https://github.com/mg2015started
# The following was modified from examples/ray_multi_instance.py
import argparse
import logging
import os
import pickle
import warnings
import gym
import numpy as np
import tensorflow as tf
from ac_network import ActorNetwork, CriticNetwork
from adapters import (
action_adapter,
cross_interface,
get_aux_info,
observation_adapter,
reward_adapter,
)
from config import HyperParameters
from prioritized_replay import Buffer
from soc_mt_ac_network import SocMtActorNetwork, SocMtCriticNetwork
from smarts.core.utils.episodes import episodes
from smarts.zoo.agent_spec import AgentSpec
from utils import get_split_batch
warnings.filterwarnings("ignore")
logging.basicConfig(level=logging.INFO)
AGENT_ID = "Agent-007"
def init_tensorflow():
configProto = tf.compat.v1.ConfigProto()
configProto.gpu_options.allow_growth = True
# reset tensorflow graph
tf.compat.v1.reset_default_graph()
return configProto
def train(
training_scenarios,
sim_name,
headless,
num_episodes,
seed,
without_soc_mt,
session_dir,
):
WITH_SOC_MT = without_soc_mt
config = HyperParameters()
configProto = init_tensorflow()
# init env
agent_spec = AgentSpec(
# you can custom AgentInterface to control what obs information you need and the action type
interface=cross_interface,
# agent_builder=actor,
# you can custom your observation adapter, reward adapter, info adapter, action adapter and so on.
observation_adapter=observation_adapter,
reward_adapter=reward_adapter,
action_adapter=action_adapter,
)
env = gym.make(
"smarts.env:hiway-v0",
scenarios=training_scenarios,
agent_specs={AGENT_ID: agent_spec},
sim_name=sim_name,
headless=headless,
timestep_sec=0.1,
seed=seed,
)
# init nets structure
if WITH_SOC_MT:
model_name = "Soc_Mt_TD3Network"
actor = SocMtActorNetwork(name="actor")
critic_1 = SocMtCriticNetwork(name="critic_1")
critic_2 = SocMtCriticNetwork(name="critic_2")
else:
model_name = "TD3Network"
actor = ActorNetwork(name="actor")
critic_1 = CriticNetwork(name="critic_1")
critic_2 = CriticNetwork(name="critic_2")
# tensorflow summary for tensorboard visualization
writer = tf.compat.v1.summary.FileWriter("summary")
# losses
tf.compat.v1.summary.scalar("Loss", critic_1.loss)
tf.compat.v1.summary.scalar("Hubor_loss", critic_1.loss_2)
tf.compat.v1.summary.histogram("ISWeights", critic_1.ISWeights)
write_op = tf.compat.v1.summary.merge_all()
saver = tf.compat.v1.train.Saver(max_to_keep=1000)
# init memory buffer
buffer = Buffer(config.buffer_size, config.pretrain_length)
if config.load_buffer: # !!!the capacity of the buffer is limited with buffer file
buffer = buffer.load_buffer(config.buffer_load_path)
print("BUFFER: Buffer Loaded")
else:
buffer.fill_buffer(env, AGENT_ID)
print("BUFFER: Buffer Filled")
buffer.save_buffer(config.buffer_save_path, buffer)
print("BUFFER: Buffer initialize")
with tf.compat.v1.Session(config=configProto) as sess:
# init nets params
sess.run(tf.compat.v1.global_variables_initializer())
writer.add_graph(sess.graph)
# update params of the target network
actor.update_target(sess)
critic_1.update_target(sess)
critic_2.update_target(sess)
# Reinforcement Learning loop
print("Training Starts...")
# experiment results
recent_rewards = [] # rewards from recent 100 episodes
avarage_rewards = [] # avareage reward of recent 100 episodes
recent_success = []
recent_success_rate = []
EPSILON = 1
for episode in episodes(n=num_episodes):
env_steps = 0
# save the model from time to time
if config.model_save_frequency:
if episode.index % config.model_save_frequency == 0:
save_path = saver.save(sess, f"{session_dir}/{model_name}.ckpt")
print("latest model saved")
if episode.index % config.model_save_frequency_no_paste == 0:
saver.save(
sess,
f"{session_dir}/{model_name}_{str(episode.index)}.ckpt",
)
print("model saved")
# initialize
EPSILON = (config.noised_episodes - episode.index) / config.noised_episodes
episode_reward = 0
observations = env.reset() # states of all vehs
state = observations[AGENT_ID] # ego state
episode.record_scenario(env.scenario_log)
dones = {"__all__": False}
while not dones["__all__"]:
action_noise = actor.get_action_noise(sess, state, rate=EPSILON)
observations, rewards, dones, infos = env.step(
{AGENT_ID: action_noise}
) # states of all vehs in next step
# ego state in next step
next_state = observations[AGENT_ID]
if WITH_SOC_MT:
reward = rewards[AGENT_ID]
else:
reward = np.sum(rewards.values())
done = dones[AGENT_ID]
info = infos[AGENT_ID]
aux_info = get_aux_info(infos[AGENT_ID]["env_obs"])
episode.record_step(observations, rewards, dones, infos)
if WITH_SOC_MT:
episode_reward += np.sum(reward)
else:
episode_reward += reward
# store the experience
experience = state, action_noise, reward, next_state, done
# print(state)
buffer.store(experience)
## Model training STARTS
if env_steps % config.train_frequency == 0:
# "Delayed" Policy Updates
policy_delayed = 2
for _ in range(policy_delayed):
# First we need a mini-batch with experiences (s, a, r, s', done)
tree_idx, batch, ISWeights_mb = buffer.sample(config.batch_size)
s_mb, a_mb, r_mb, next_s_mb, dones_mb = get_split_batch(batch)
task_mb = s_mb[:, -config.task_size :]
next_task_mb = next_s_mb[:, -config.task_size :]
# Get q_target values for next_state from the critic_target
if WITH_SOC_MT:
a_target_next_state = actor.get_action_target(
sess, next_s_mb
) # with Target Policy Smoothing
q_target_next_state_1 = critic_1.get_q_value_target(
sess, next_s_mb, a_target_next_state
)
q_target_next_state_1 = (
q_target_next_state_1 * next_task_mb
) # multi task q value
q_target_next_state_2 = critic_2.get_q_value_target(
sess, next_s_mb, a_target_next_state
)
q_target_next_state_2 = (
q_target_next_state_2 * next_task_mb
) # multi task q value
q_target_next_state = np.minimum(
q_target_next_state_1, q_target_next_state_2
)
else:
a_target_next_state = actor.get_action_target(
sess, next_s_mb
) # with Target Policy Smoothing
q_target_next_state_1 = critic_1.get_q_value_target(
sess, next_s_mb, a_target_next_state
)
q_target_next_state_2 = critic_2.get_q_value_target(
sess, next_s_mb, a_target_next_state
)
q_target_next_state = np.minimum(
q_target_next_state_1, q_target_next_state_2
)
# Set Q_target = r if the episode ends at s+1, otherwise Q_target = r + gamma * Qtarget(s',a')
target_Qs_batch = []
for i in range(0, len(dones_mb)):
terminal = dones_mb[i]
# if we are in a terminal state. only equals reward
if terminal:
target_Qs_batch.append((r_mb[i] * task_mb[i]))
else:
# take the Q taregt for action a'
target = (
r_mb[i] * task_mb[i]
+ config.gamma * q_target_next_state[i]
)
target_Qs_batch.append(target)
targets_mb = np.array([each for each in target_Qs_batch])
# critic train
if len(a_mb.shape) > 2:
a_mb = np.squeeze(a_mb, axis=1)
loss, absolute_errors = critic_1.train(
sess, s_mb, a_mb, targets_mb, ISWeights_mb
)
loss_2, absolute_errors_2 = critic_2.train(
sess, s_mb, a_mb, targets_mb, ISWeights_mb
)
# actor train
a_for_grad = actor.get_action(sess, s_mb)
a_gradients = critic_1.get_gradients(sess, s_mb, a_for_grad)
# print(a_gradients)
actor.train(sess, s_mb, a_gradients[0])
# target train
actor.update_target(sess)
critic_1.update_target(sess)
critic_2.update_target(sess)
# update replay memory priorities
if WITH_SOC_MT:
absolute_errors = np.sum(absolute_errors, axis=1)
buffer.batch_update(tree_idx, absolute_errors)
## Model training ENDS
if done:
# visualize reward data
recent_rewards.append(episode_reward)
if len(recent_rewards) > 100:
recent_rewards.pop(0)
avarage_rewards.append(np.mean(recent_rewards))
avarage_rewards_data = np.array(avarage_rewards)
d = {"avarage_rewards": avarage_rewards_data}
with open(
os.path.join("results", "reward_data" + ".pkl"), "wb"
) as f:
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
# visualize success rate data
if aux_info == "success":
recent_success.append(1)
else:
recent_success.append(0)
if len(recent_success) > 100:
recent_success.pop(0)
avarage_success_rate = recent_success.count(1) / len(recent_success)
recent_success_rate.append(avarage_success_rate)
recent_success_rate_data = np.array(recent_success_rate)
d = {"recent_success_rates": recent_success_rate_data}
with open(
os.path.join("results", "success_rate_data" + ".pkl"), "wb"
) as f:
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
# print results on the terminal
print("Episode total reward:", episode_reward)
print("Episode time:", env_steps * 0.1)
print("Success rate:", avarage_success_rate)
print(episode.index, "episode finished.")
buffer.measure_utilization()
print("---" * 15)
break
else:
state = next_state
env_steps += 1
env.close()
def default_argument_parser(program: str):
"""This factory method returns a vanilla `argparse.ArgumentParser` with the
minimum subset of arguments that should be supported.
You can extend it with more `parser.add_argument(...)` calls or obtain the
arguments via `parser.parse_args()`.
"""
parser = argparse.ArgumentParser(program)
parser.add_argument(
"scenarios",
help="A list of scenarios. Each element can be either the scenario to run "
"(see scenarios/ for some samples you can use) OR a directory of scenarios "
"to sample from.",
type=str,
nargs="+",
)
parser.add_argument(
"--sim-name",
help="a string that gives this simulation a name.",
type=str,
default=None,
)
parser.add_argument(
"--headless", help="Run the simulation in headless mode.", action="store_true"
)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument(
"--sumo-port", help="Run SUMO with a specified port.", type=int, default=None
)
parser.add_argument(
"--episodes",
help="The number of episodes to run the simulation for.",
type=int,
default=5000,
)
return parser
if __name__ == "__main__":
parser = default_argument_parser("pytorch-example")
parser.add_argument(
"--without-soc-mt", help="Enable social mt.", action="store_true"
)
parser.add_argument(
"--session-dir",
help="The save directory for the model.",
type=str,
default="model/",
)
args = parser.parse_args()
train(
training_scenarios=args.scenarios,
sim_name=args.sim_name,
headless=args.headless,
num_episodes=args.episodes,
seed=args.seed,
without_soc_mt=args.without_soc_mt,
session_dir=args.session_dir,
)
| 41.343832
| 118
| 0.562913
|
6f5397f4f180d682238e4f61981a809b3ef2aabd
| 2,596
|
py
|
Python
|
tartiflette/scalar/builtins/string.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 530
|
2019-06-04T11:45:36.000Z
|
2022-03-31T09:29:56.000Z
|
tartiflette/scalar/builtins/string.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 242
|
2019-06-04T11:53:08.000Z
|
2022-03-28T07:06:27.000Z
|
tartiflette/scalar/builtins/string.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 36
|
2019-06-21T06:40:27.000Z
|
2021-11-04T13:11:16.000Z
|
from typing import Any, Dict, Optional, Union
from tartiflette import Scalar
from tartiflette.constants import UNDEFINED_VALUE
from tartiflette.language.ast import StringValueNode
class ScalarString:
"""
Built-in scalar which handle string values.
"""
def coerce_output(self, value: Any) -> str:
"""
Coerce the resolved value for output.
:param value: value to coerce
:type value: Any
:return: the coerced value
:rtype: str
"""
# pylint: disable=no-self-use
if isinstance(value, str):
return value
if isinstance(value, bool):
return "true" if value else "false"
try:
# TODO: maybe we shouldn't accepts None, list, dict, exceptions...
return str(value)
except Exception: # pylint: disable=broad-except
pass
raise TypeError(f"String cannot represent value: < {value} >.")
def coerce_input(self, value: Any) -> str:
"""
Coerce the user input from variable value.
:param value: value to coerce
:type value: Any
:return: the coerced value
:rtype: str
"""
# pylint: disable=no-self-use
if not isinstance(value, str):
raise TypeError(
f"String cannot represent a non string value: < {value} >."
)
return value
def parse_literal(self, ast: "Node") -> Union[str, "UNDEFINED_VALUE"]:
"""
Coerce the input value from an AST node.
:param ast: AST node to coerce
:type ast: Node
:return: the coerced value
:rtype: Union[str, UNDEFINED_VALUE]
"""
# pylint: disable=no-self-use
return (
ast.value if isinstance(ast, StringValueNode) else UNDEFINED_VALUE
)
def bake(schema_name: str, config: Optional[Dict[str, Any]] = None) -> str:
"""
Links the scalar to the appropriate schema and returns the SDL related
to the scalar.
:param schema_name: schema name to link with
:param config: configuration of the scalar
:type schema_name: str
:type config: Optional[Dict[str, Any]]
:return: the SDL related to the scalar
:rtype: str
"""
# pylint: disable=unused-argument
Scalar("String", schema_name=schema_name)(ScalarString())
return '''
"""The `String` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text."""
scalar String
'''
| 32.049383
| 191
| 0.614407
|
8d55b2b7fa1c08a07eb1275c93af59256d27d79c
| 151
|
py
|
Python
|
config.py
|
siprikorea/aguri-bot
|
98c81ee865f7814be0574c69dedaae0373da7249
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
siprikorea/aguri-bot
|
98c81ee865f7814be0574c69dedaae0373da7249
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
siprikorea/aguri-bot
|
98c81ee865f7814be0574c69dedaae0373da7249
|
[
"Apache-2.0"
] | null | null | null |
class config(object):
bot_token = '190337691:AAEmrH-pVz_wImCfYpnFDRA4G12Jhx7X1Jk'
base_url = 'https://api.telegram.org/bot' + bot_token + '/'
| 30.2
| 63
| 0.715232
|
72f28c924cd6ef0ce99f261039db52a2bdade042
| 22,034
|
py
|
Python
|
nmt/model_helper.py
|
whiskyboy/CVAE_GNMT
|
12d01df4b36cb5c44eb719c79cae71d782e1aacd
|
[
"Apache-2.0"
] | null | null | null |
nmt/model_helper.py
|
whiskyboy/CVAE_GNMT
|
12d01df4b36cb5c44eb719c79cae71d782e1aacd
|
[
"Apache-2.0"
] | null | null | null |
nmt/model_helper.py
|
whiskyboy/CVAE_GNMT
|
12d01df4b36cb5c44eb719c79cae71d782e1aacd
|
[
"Apache-2.0"
] | null | null | null |
"""Utility functions for building models."""
from __future__ import print_function
import collections
import six
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from .utils import iterator_utils
from .utils import misc_utils as utils
from .utils import vocab_utils
__all__ = [
"get_initializer", "get_device_str", "create_train_model",
"create_eval_model", "create_infer_model",
"create_emb_for_encoder_and_decoder", "create_rnn_cell", "gradient_clip",
"create_or_load_model", "load_model", "avg_checkpoints",
"compute_perplexity"
]
# If a vocab size is greater than this value, put the embedding on cpu instead
VOCAB_SIZE_THRESHOLD_CPU = 50000
def get_initializer(init_op, seed=None, init_weight=None):
"""Create an initializer. init_weight is only for uniform."""
if init_op == "uniform":
assert init_weight
return tf.random_uniform_initializer(
-init_weight, init_weight, seed=seed)
elif init_op == "glorot_normal":
return tf.keras.initializers.glorot_normal(
seed=seed)
elif init_op == "glorot_uniform":
return tf.keras.initializers.glorot_uniform(
seed=seed)
else:
raise ValueError("Unknown init_op %s" % init_op)
def get_device_str(device_id, num_gpus):
"""Return a device string for multi-GPU setup."""
if num_gpus == 0:
return "/cpu:0"
device_str_output = "/gpu:%d" % (device_id % num_gpus)
return device_str_output
class ExtraArgs(collections.namedtuple(
"ExtraArgs", ("single_cell_fn", "model_device_fn",
"attention_mechanism_fn"))):
pass
class TrainModel(
collections.namedtuple("TrainModel", ("graph", "model", "iterator",
"skip_count_placeholder"))):
pass
def create_train_model(
model_creator, hparams, scope=None, num_workers=1, jobid=0,
extra_args=None):
"""Create train graph, model, and iterator."""
src_file = "%s.%s" % (hparams.train_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.train_prefix, hparams.tgt)
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "train"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
src_dataset = tf.data.TextLineDataset(src_file)
tgt_dataset = tf.data.TextLineDataset(tgt_file)
skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size=hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len,
tgt_max_len=hparams.tgt_max_len,
skip_count=skip_count_placeholder,
num_shards=num_workers,
shard_index=jobid)
# Note: One can set model_device_fn to
# `tf.train.replica_device_setter(ps_tasks)` for distributed training.
model_device_fn = None
if extra_args: model_device_fn = extra_args.model_device_fn
with tf.device(model_device_fn):
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.TRAIN,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return TrainModel(
graph=graph,
model=model,
iterator=iterator,
skip_count_placeholder=skip_count_placeholder)
class EvalModel(
collections.namedtuple("EvalModel",
("graph", "model", "src_file_placeholder",
"tgt_file_placeholder", "iterator"))):
pass
def create_eval_model(model_creator, hparams, scope=None, extra_args=None):
"""Create train graph, model, src/tgt file holders, and iterator."""
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "eval"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
src_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
tgt_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
src_dataset = tf.data.TextLineDataset(src_file_placeholder)
tgt_dataset = tf.data.TextLineDataset(tgt_file_placeholder)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len_infer,
tgt_max_len=hparams.tgt_max_len_infer)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.EVAL,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return EvalModel(
graph=graph,
model=model,
src_file_placeholder=src_file_placeholder,
tgt_file_placeholder=tgt_file_placeholder,
iterator=iterator)
class InferModel(
collections.namedtuple("InferModel",
("graph", "model", "src_placeholder",
"batch_size_placeholder", "iterator"))):
pass
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
"""Create inference model."""
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.data.Dataset.from_tensor_slices(
src_placeholder)
iterator = iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size_placeholder,
eos=hparams.eos,
src_max_len=hparams.src_max_len_infer)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator)
def _get_embed_device(vocab_size):
"""Decide on which device to place an embed matrix given its vocab size."""
if vocab_size > VOCAB_SIZE_THRESHOLD_CPU:
return "/cpu:0"
else:
return "/gpu:0"
def _create_pretrained_emb_from_txt(
vocab_file, embed_file, num_trainable_tokens=3, dtype=tf.float32,
scope=None):
"""Load pretrain embeding from embed_file, and return an embedding matrix.
Args:
embed_file: Path to a Glove formated embedding txt file.
num_trainable_tokens: Make the first n tokens in the vocab file as trainable
variables. Default is 3, which is "<unk>", "<s>" and "</s>".
"""
vocab, _ = vocab_utils.load_vocab(vocab_file)
trainable_tokens = vocab[:num_trainable_tokens]
utils.print_out("# Using pretrained embedding: %s." % embed_file)
utils.print_out(" with trainable tokens: ")
emb_dict, emb_size = vocab_utils.load_embed_txt(embed_file)
for token in trainable_tokens:
utils.print_out(" %s" % token)
if token not in emb_dict:
emb_dict[token] = [0.0] * emb_size
emb_mat = np.array(
[emb_dict[token] for token in vocab], dtype=dtype.as_numpy_dtype())
emb_mat = tf.constant(emb_mat)
emb_mat_const = tf.slice(emb_mat, [num_trainable_tokens, 0], [-1, -1])
with tf.variable_scope(scope or "pretrain_embeddings", dtype=dtype) as scope:
with tf.device(_get_embed_device(num_trainable_tokens)):
emb_mat_var = tf.get_variable(
"emb_mat_var", [num_trainable_tokens, emb_size])
return tf.concat([emb_mat_var, emb_mat_const], 0)
def _create_or_load_embed(embed_name, vocab_file, embed_file,
vocab_size, embed_size, dtype):
"""Create a new or load an existing embedding matrix."""
if vocab_file and embed_file:
embedding = _create_pretrained_emb_from_txt(vocab_file, embed_file)
else:
with tf.device(_get_embed_device(vocab_size)):
embedding = tf.get_variable(
embed_name, [vocab_size, embed_size], dtype)
return embedding
def create_emb_for_encoder_and_decoder(share_vocab,
src_vocab_size,
tgt_vocab_size,
src_embed_size,
tgt_embed_size,
dtype=tf.float32,
num_partitions=0,
src_vocab_file=None,
tgt_vocab_file=None,
src_embed_file=None,
tgt_embed_file=None,
scope=None):
"""Create embedding matrix for both encoder and decoder.
Args:
share_vocab: A boolean. Whether to share embedding matrix for both
encoder and decoder.
src_vocab_size: An integer. The source vocab size.
tgt_vocab_size: An integer. The target vocab size.
src_embed_size: An integer. The embedding dimension for the encoder's
embedding.
tgt_embed_size: An integer. The embedding dimension for the decoder's
embedding.
dtype: dtype of the embedding matrix. Default to float32.
num_partitions: number of partitions used for the embedding vars.
scope: VariableScope for the created subgraph. Default to "embedding".
Returns:
embedding_encoder: Encoder's embedding matrix.
embedding_decoder: Decoder's embedding matrix.
Raises:
ValueError: if use share_vocab but source and target have different vocab
size.
"""
if num_partitions <= 1:
partitioner = None
else:
# Note: num_partitions > 1 is required for distributed training due to
# embedding_lookup tries to colocate single partition-ed embedding variable
# with lookup ops. This may cause embedding variables being placed on worker
# jobs.
partitioner = tf.fixed_size_partitioner(num_partitions)
if (src_embed_file or tgt_embed_file) and partitioner:
raise ValueError(
"Can't set num_partitions > 1 when using pretrained embedding")
with tf.variable_scope(
scope or "embeddings", dtype=dtype, partitioner=partitioner) as scope:
# Share embedding
if share_vocab:
if src_vocab_size != tgt_vocab_size:
raise ValueError("Share embedding but different src/tgt vocab sizes"
" %d vs. %d" % (src_vocab_size, tgt_vocab_size))
assert src_embed_size == tgt_embed_size
utils.print_out("# Use the same embedding for source and target")
vocab_file = src_vocab_file or tgt_vocab_file
embed_file = src_embed_file or tgt_embed_file
embedding_encoder = _create_or_load_embed(
"embedding_share", vocab_file, embed_file,
src_vocab_size, src_embed_size, dtype)
embedding_decoder = embedding_encoder
else:
with tf.variable_scope("encoder", partitioner=partitioner):
embedding_encoder = _create_or_load_embed(
"embedding_encoder", src_vocab_file, src_embed_file,
src_vocab_size, src_embed_size, dtype)
with tf.variable_scope("decoder", partitioner=partitioner):
embedding_decoder = _create_or_load_embed(
"embedding_decoder", tgt_vocab_file, tgt_embed_file,
tgt_vocab_size, tgt_embed_size, dtype)
return embedding_encoder, embedding_decoder
def _single_cell(unit_type, num_units, forget_bias, dropout, mode,
residual_connection=False, device_str=None, residual_fn=None):
"""Create an instance of a single RNN cell."""
# dropout (= 1 - keep_prob) is set to 0 during eval and infer
dropout = dropout if mode == tf.contrib.learn.ModeKeys.TRAIN else 0.0
# Cell Type
if unit_type == "lstm":
utils.print_out(" LSTM, forget_bias=%g" % forget_bias, new_line=False)
single_cell = tf.contrib.rnn.BasicLSTMCell(
num_units,
forget_bias=forget_bias)
elif unit_type == "gru":
utils.print_out(" GRU", new_line=False)
single_cell = tf.contrib.rnn.GRUCell(num_units)
elif unit_type == "layer_norm_lstm":
utils.print_out(" Layer Normalized LSTM, forget_bias=%g" % forget_bias,
new_line=False)
single_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units,
forget_bias=forget_bias,
layer_norm=True)
elif unit_type == "nas":
utils.print_out(" NASCell", new_line=False)
single_cell = tf.contrib.rnn.NASCell(num_units)
else:
raise ValueError("Unknown unit type %s!" % unit_type)
# Dropout (= 1 - keep_prob)
if dropout > 0.0:
single_cell = tf.contrib.rnn.DropoutWrapper(
cell=single_cell, input_keep_prob=(1.0 - dropout))
utils.print_out(" %s, dropout=%g " %(type(single_cell).__name__, dropout),
new_line=False)
# Residual
if residual_connection:
single_cell = tf.contrib.rnn.ResidualWrapper(
single_cell, residual_fn=residual_fn)
utils.print_out(" %s" % type(single_cell).__name__, new_line=False)
# Device Wrapper
if device_str:
single_cell = tf.contrib.rnn.DeviceWrapper(single_cell, device_str)
utils.print_out(" %s, device=%s" %
(type(single_cell).__name__, device_str), new_line=False)
return single_cell
def _cell_list(unit_type, num_units, num_layers, num_residual_layers,
forget_bias, dropout, mode, num_gpus, base_gpu=0,
single_cell_fn=None, residual_fn=None):
"""Create a list of RNN cells."""
if not single_cell_fn:
single_cell_fn = _single_cell
# Multi-GPU
cell_list = []
for i in range(num_layers):
utils.print_out(" cell %d" % i, new_line=False)
single_cell = single_cell_fn(
unit_type=unit_type,
num_units=num_units,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
residual_connection=(i >= num_layers - num_residual_layers),
device_str=get_device_str(i + base_gpu, num_gpus),
residual_fn=residual_fn
)
utils.print_out("")
cell_list.append(single_cell)
return cell_list
def create_rnn_cell(unit_type, num_units, num_layers, num_residual_layers,
forget_bias, dropout, mode, num_gpus, base_gpu=0,
single_cell_fn=None):
"""Create multi-layer RNN cell.
Args:
unit_type: string representing the unit type, i.e. "lstm".
num_units: the depth of each unit.
num_layers: number of cells.
num_residual_layers: Number of residual layers from top to bottom. For
example, if `num_layers=4` and `num_residual_layers=2`, the last 2 RNN
cells in the returned list will be wrapped with `ResidualWrapper`.
forget_bias: the initial forget bias of the RNNCell(s).
dropout: floating point value between 0.0 and 1.0:
the probability of dropout. this is ignored if `mode != TRAIN`.
mode: either tf.contrib.learn.TRAIN/EVAL/INFER
num_gpus: The number of gpus to use when performing round-robin
placement of layers.
base_gpu: The gpu device id to use for the first RNN cell in the
returned list. The i-th RNN cell will use `(base_gpu + i) % num_gpus`
as its device id.
single_cell_fn: allow for adding customized cell.
When not specified, we default to model_helper._single_cell
Returns:
An `RNNCell` instance.
"""
cell_list = _cell_list(unit_type=unit_type,
num_units=num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
num_gpus=num_gpus,
base_gpu=base_gpu,
single_cell_fn=single_cell_fn)
if len(cell_list) == 1: # Single layer.
return cell_list[0]
else: # Multi layers
return tf.contrib.rnn.MultiRNNCell(cell_list)
def gradient_clip(gradients, max_gradient_norm):
"""Clipping gradients of a model."""
clipped_gradients, gradient_norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
gradient_norm_summary = [tf.summary.scalar("grad_norm", gradient_norm)]
gradient_norm_summary.append(
tf.summary.scalar("clipped_gradient", tf.global_norm(clipped_gradients)))
return clipped_gradients, gradient_norm_summary, gradient_norm
def load_model(model, ckpt, session, name):
start_time = time.time()
model.saver.restore(session, ckpt)
session.run(tf.tables_initializer())
utils.print_out(
" loaded %s model parameters from %s, time %.2fs" %
(name, ckpt, time.time() - start_time))
return model
def avg_checkpoints(model_dir, num_last_checkpoints, global_step,
global_step_name):
"""Average the last N checkpoints in the model_dir."""
checkpoint_state = tf.train.get_checkpoint_state(model_dir)
if not checkpoint_state:
utils.print_out("# No checkpoint file found in directory: %s" % model_dir)
return None
# Checkpoints are ordered from oldest to newest.
checkpoints = (
checkpoint_state.all_model_checkpoint_paths[-num_last_checkpoints:])
if len(checkpoints) < num_last_checkpoints:
utils.print_out(
"# Skipping averaging checkpoints because not enough checkpoints is "
"avaliable."
)
return None
avg_model_dir = os.path.join(model_dir, "avg_checkpoints")
if not tf.gfile.Exists(avg_model_dir):
utils.print_out(
"# Creating new directory %s for saving averaged checkpoints." %
avg_model_dir)
tf.gfile.MakeDirs(avg_model_dir)
utils.print_out("# Reading and averaging variables in checkpoints:")
var_list = tf.contrib.framework.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
if name != global_step_name:
var_values[name] = np.zeros(shape)
for checkpoint in checkpoints:
utils.print_out(" %s" % checkpoint)
reader = tf.contrib.framework.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
var_dtypes[name] = tensor.dtype
var_values[name] += tensor
for name in var_values:
var_values[name] /= len(checkpoints)
# Build a graph with same variables in the checkpoints, and save the averaged
# variables into the avg_model_dir.
with tf.Graph().as_default():
tf_vars = [
tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[name])
for v in var_values
]
placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
global_step_var = tf.Variable(
global_step, name=global_step_name, trainable=False)
saver = tf.train.Saver(tf.all_variables())
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for p, assign_op, (name, value) in zip(placeholders, assign_ops,
six.iteritems(var_values)):
sess.run(assign_op, {p: value})
# Use the built saver to save the averaged checkpoint. Only keep 1
# checkpoint and the best checkpoint will be moved to avg_best_metric_dir.
saver.save(
sess,
os.path.join(avg_model_dir, "translate.ckpt"))
return avg_model_dir
def create_or_load_model(model, model_dir, session, name):
"""Create translation model and initialize or load parameters in session."""
latest_ckpt = tf.train.latest_checkpoint(model_dir)
if latest_ckpt:
model = load_model(model, latest_ckpt, session, name)
else:
start_time = time.time()
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
utils.print_out(" created %s model with fresh parameters, time %.2fs" %
(name, time.time() - start_time))
global_step = model.global_step.eval(session=session)
return model, global_step
def compute_perplexity(model, sess, name):
"""Compute perplexity of the output of the model.
Args:
model: model for compute perplexity.
sess: tensorflow session to use.
name: name of the batch.
Returns:
The perplexity of the eval outputs.
"""
total_loss = 0
total_predict_count = 0
start_time = time.time()
while True:
try:
step_result = model.eval(sess)
if len(step_result) == 3:
loss, predict_count, batch_size = step_result
bow_loss = kl_loss = 0
elif len(step_result) == 5:
loss, predict_count, batch_size, bow_loss, kl_loss = step_result
total_loss += (loss - bow_loss - kl_loss) * batch_size
total_predict_count += predict_count
except tf.errors.OutOfRangeError:
break
perplexity = utils.safe_exp(total_loss / total_predict_count)
utils.print_time(" eval %s: perplexity %.2f" % (name, perplexity),
start_time)
return perplexity
| 36.003268
| 80
| 0.6824
|
5f28b4d5282c2563c296dfecbcbe72224ee8fce1
| 400
|
py
|
Python
|
src/network_manager_dispatcher_app/sound_thread.py
|
meads/dns-leak-test-py
|
2771c56c486ab549519d886e691993a6b090f27c
|
[
"MIT"
] | 1
|
2022-02-19T09:09:52.000Z
|
2022-02-19T09:09:52.000Z
|
src/network_manager_dispatcher_app/sound_thread.py
|
meads/network-manager-dispatcher-app
|
2771c56c486ab549519d886e691993a6b090f27c
|
[
"MIT"
] | null | null | null |
src/network_manager_dispatcher_app/sound_thread.py
|
meads/network-manager-dispatcher-app
|
2771c56c486ab549519d886e691993a6b090f27c
|
[
"MIT"
] | null | null | null |
import threading
from queue import Queue
from .sox_player import ISoXPlayer
class SoundThread(threading.Thread):
def __init__(self, q: Queue, play: ISoXPlayer):
threading.Thread.__init__(self)
self.q = q
self.play = play
def run(self):
while True:
if self.q.empty():
self.play.dialer()
continue
return
| 23.529412
| 51
| 0.5875
|
ec211937be56a3d8aa96b4904a7e8fce678d5840
| 6,952
|
py
|
Python
|
test/functional/rpc_getblockstats.py
|
WFLSCoin/wflscoin
|
794eb115845c3e7d6b75cf40031568bf5329ee25
|
[
"MIT"
] | null | null | null |
test/functional/rpc_getblockstats.py
|
WFLSCoin/wflscoin
|
794eb115845c3e7d6b75cf40031568bf5329ee25
|
[
"MIT"
] | null | null | null |
test/functional/rpc_getblockstats.py
|
WFLSCoin/wflscoin
|
794eb115845c3e7d6b75cf40031568bf5329ee25
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Wflscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import WflscoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(WflscoinTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].generate(COINBASE_MATURITY + 1)
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
| 41.380952
| 121
| 0.627014
|
b46b87e5c35cb0ed602cd3a787ce287c9838c13f
| 627
|
py
|
Python
|
api/app/utils.py
|
pedroporras/intro-deployment-ml
|
ff2768be17bc6ca3b3ab5ae5380c3b7516f9396d
|
[
"MIT"
] | null | null | null |
api/app/utils.py
|
pedroporras/intro-deployment-ml
|
ff2768be17bc6ca3b3ab5ae5380c3b7516f9396d
|
[
"MIT"
] | null | null | null |
api/app/utils.py
|
pedroporras/intro-deployment-ml
|
ff2768be17bc6ca3b3ab5ae5380c3b7516f9396d
|
[
"MIT"
] | null | null | null |
from multiprocessing import Pipe
from joblib import load
from sklearn.pipeline import Pipeline
from pydantic import BaseModel
from pandas import DataFrame
import os
from io import BytesIO
def get_model() -> Pipeline:
model_path = os.environ.get('MODEL_PATH', 'model/model.pkl')
with open(model_path, 'rb') as model_file:
model = load(BytesIO(model_file.read()))
return model
def transform_to_dataframe(class_model: BaseModel) -> DataFrame:
transition_dictionary = {key: [value] for key, value in class_model.dict().items()}
data_frame = DataFrame(transition_dictionary)
return data_frame
| 31.35
| 87
| 0.751196
|
688f1bc7810191f836e6233b3209444bb9181847
| 4,401
|
py
|
Python
|
tests/test_cookiecutter_python_library.py
|
ghassanmas/edx-cookiecutters
|
b186021f7322d9c493f1875fff797ebae33157b0
|
[
"Apache-2.0"
] | 2
|
2022-02-21T15:21:40.000Z
|
2022-03-07T14:09:05.000Z
|
tests/test_cookiecutter_python_library.py
|
ghassanmas/edx-cookiecutters
|
b186021f7322d9c493f1875fff797ebae33157b0
|
[
"Apache-2.0"
] | 8
|
2021-12-27T02:19:03.000Z
|
2022-03-15T19:05:52.000Z
|
tests/test_cookiecutter_python_library.py
|
ghassanmas/edx-cookiecutters
|
b186021f7322d9c493f1875fff797ebae33157b0
|
[
"Apache-2.0"
] | 1
|
2022-03-02T16:48:26.000Z
|
2022-03-02T16:48:26.000Z
|
"""
Tests of the project generation output.
"""
import logging
import logging.config
import os
import re
from contextlib import contextmanager
from pathlib import Path
import pytest
import sh
from test_utils.bake import bake_in_temp_dir
from test_utils.venv import run_in_virtualenv
LOGGING_CONFIG = {
'version': 1,
'incremental': True,
'loggers': {
'binaryornot': {
'level': logging.INFO,
},
'sh': {
'level': logging.INFO,
}
}
}
logging.config.dictConfig(LOGGING_CONFIG)
common = {
"library_name": "cookie_lover",
"repo_name": "cookie_repo",
}
configurations = [
pytest.param(
{
**common,
},
)
]
@pytest.fixture(name='custom_template', scope="module")
def fixture_custom_template(cookies_session):
template = cookies_session._default_template + "/cookiecutter-python-library" # pylint: disable=protected-access
return template
@pytest.fixture(params=configurations, name='options_baked', scope="module")
def fixture_options_baked(cookies_session, request, custom_template):
"""
Bake a cookie cutter, parameterized by configurations.
Provides the configuration dict, and changes into the directory with the
baked result.
"""
with bake_in_temp_dir(cookies_session, extra_context=request.param, template=custom_template):
yield request.param
# Fixture names aren't always used in test functions. Disable completely.
# pylint: disable=unused-argument
@pytest.mark.parametrize("license_name, target_string", [
('AGPL 3.0', 'GNU AFFERO GENERAL PUBLIC LICENSE'),
('Apache Software License 2.0', 'Apache'),
])
def test_bake_selecting_license(cookies, license_name, target_string, custom_template):
"""Test to check if LICENSE.txt gets the correct license selected."""
with bake_in_temp_dir(cookies, extra_context={'open_source_license': license_name}, template=custom_template):
assert target_string in Path("LICENSE.txt").read_text()
assert license_name in Path("setup.py").read_text()
def test_readme(options_baked, custom_template):
"""The generated README.rst file should pass some sanity checks and validate as a PyPI long description."""
readme_file = Path('README.rst')
readme_lines = [x.strip() for x in readme_file.open()]
assert "cookie_repo" == readme_lines[0]
assert ':target: https://pypi.python.org/pypi/cookie_repo/' in readme_lines
try:
os.system("python -m build --wheel")
os.system("twine check dist/*")
except os.ErrorReturnCode as exc:
pytest.fail(str(exc))
def test_github_actions_ci(options_baked):
"""The generated ci.yml file should pass a sanity check."""
ci_text = Path(".github/workflows/ci.yml").read_text()
assert 'pip install -r requirements/ci.txt' in ci_text
def test_manifest(options_baked):
"""The generated MANIFEST.in should pass a sanity check."""
manifest_text = Path("MANIFEST.in").read_text()
assert 'recursive-include cookie_lover *.html' in manifest_text
def test_setup_py(options_baked):
"""The generated setup.py should pass a sanity check."""
setup_text = Path("setup.py").read_text()
assert "VERSION = get_version('cookie_lover', '__init__.py')" in setup_text
assert " author='edX'," in setup_text
def test_upgrade(options_baked):
"""Make sure the upgrade target works"""
try:
run_in_virtualenv('make upgrade')
except sh.ErrorReturnCode as exc:
pytest.fail(str(exc.stderr))
def test_quality(options_baked):
"""Run quality tests on the given generated output."""
for dirpath, _dirnames, filenames in os.walk("."):
for filename in filenames:
name = os.path.join(dirpath, filename)
if not name.endswith('.py'):
continue
try:
sh.pylint(name)
sh.pycodestyle(name)
sh.pydocstyle(name)
sh.isort(name, check_only=True, diff=True)
except sh.ErrorReturnCode as exc:
pytest.fail(str(exc))
try:
# Sanity check the generated Makefile
sh.make('help')
# quality check docs
sh.doc8("README.rst", ignore_path="docs/_build")
sh.doc8("docs", ignore_path="docs/_build")
except sh.ErrorReturnCode as exc:
pytest.fail(str(exc))
| 30.992958
| 117
| 0.676437
|
0fcb3d3f23b1a2edefc6c91960598753390bf298
| 1,426
|
py
|
Python
|
dai15shou/code15-1_fushimi.py
|
naoshige314/workshop01
|
5c7be08f99eb164b7901628de26cecfd04fa926f
|
[
"MIT"
] | null | null | null |
dai15shou/code15-1_fushimi.py
|
naoshige314/workshop01
|
5c7be08f99eb164b7901628de26cecfd04fa926f
|
[
"MIT"
] | null | null | null |
dai15shou/code15-1_fushimi.py
|
naoshige314/workshop01
|
5c7be08f99eb164b7901628de26cecfd04fa926f
|
[
"MIT"
] | 2
|
2021-06-10T11:53:02.000Z
|
2021-06-20T15:43:39.000Z
|
#Union-Findの実装
class UnionFind:
def __init__(self,n):
self.par=[-1]*n
self.siz=[1]*n
#根を求める
def root(self,x):
if self.par[x] == -1:
return x
else:
#経路圧縮
self.par[x]=self.root(self.par[x])
return self.par[x]
#グループ判定(根が一致するかどうか)
def issame(self,x,y):
return self.root(x) == self.root(y)
#xとyを併合する
def unite(self,x,y):
#根まで移動する
x=self.root(x)
y=self.root(y)
if x==y:
return False
#union by size(y側のサイズを小さく)
if self.siz[x] < self.siz[y]:
tmp=y
y=x
x=tmp
self.par[y]=x
self.siz[x] += self.siz[y]
return True
#xを含むグループのサイズ
def size(self,x):
return self.siz[self.root(x)]
#辺e=(u,v)を[w(e),u,v]で表す
#n,m=頂点数、辺数
n,m=8,12
edges=[]
edges.append([4,1,4])
edges.append([2,6,4])
edges.append([3,1,6])
edges.append([9,2,4])
edges.append([8,1,3])
edges.append([7,6,7])
edges.append([5,2,7])
edges.append([6,3,7])
edges.append([3,7,0])
edges.append([5,3,0])
edges.append([10,2,5])
edges.append([6,0,5])
#重みが小さい順にソート
edges.sort()
res=0
uf=UnionFind(n)
for i in range(m):
[w,u,v]=edges[i]
#w,u,v=edges[i][0],edges[i][1],edges[i][2]
#u,vが既に同じグループなら辺を追加しない
if uf.issame(u,v):
continue
#辺を追加
res += w
uf.unite(u,v)
print(res)
| 18.519481
| 46
| 0.514727
|
ca515f76e04d3277ba2a8619290ccd2130b3a2ff
| 44,054
|
py
|
Python
|
listings/listing11-2.py
|
eivl/MissionPython
|
dc4e06f2eaac9d53f5091ae6f921e39db986d101
|
[
"Apache-2.0"
] | 4
|
2018-09-07T15:35:24.000Z
|
2019-03-27T09:48:12.000Z
|
listings/listing11-2.py
|
eivl/MissionPython
|
dc4e06f2eaac9d53f5091ae6f921e39db986d101
|
[
"Apache-2.0"
] | 371
|
2020-03-04T21:51:56.000Z
|
2022-03-31T20:59:11.000Z
|
listings/listing11-2.py
|
eivl/MissionPython
|
dc4e06f2eaac9d53f5091ae6f921e39db986d101
|
[
"Apache-2.0"
] | 3
|
2019-06-18T19:57:17.000Z
|
2020-11-06T03:55:08.000Z
|
# Escape - A Python Adventure
# by Sean McManus / www.sean.co.uk
# Art by Rafael Pimenta
# Typed in by PUT YOUR NAME HERE
import time, random, math
###############
## VARIABLES ##
###############
WIDTH = 800 #window size
HEIGHT = 800
#PLAYER variables
PLAYER_NAME = "Sean" # change this to your name!
FRIEND1_NAME = "Karen" # change this to a friend's name!
FRIEND2_NAME = "Leo" # change this to another friend's name!
current_room = 31 # start room = 31
top_left_x = 100
top_left_y = 150
DEMO_OBJECTS = [images.floor, images.pillar, images.soil]
LANDER_SECTOR = random.randint(1, 24)
LANDER_X = random.randint(2, 11)
LANDER_Y = random.randint(2, 11)
TILE_SIZE = 30
player_y, player_x = 2, 5
game_over = False
PLAYER = {
"left": [images.spacesuit_left, images.spacesuit_left_1,
images.spacesuit_left_2, images.spacesuit_left_3,
images.spacesuit_left_4
],
"right": [images.spacesuit_right, images.spacesuit_right_1,
images.spacesuit_right_2, images.spacesuit_right_3,
images.spacesuit_right_4
],
"up": [images.spacesuit_back, images.spacesuit_back_1,
images.spacesuit_back_2, images.spacesuit_back_3,
images.spacesuit_back_4
],
"down": [images.spacesuit_front, images.spacesuit_front_1,
images.spacesuit_front_2, images.spacesuit_front_3,
images.spacesuit_front_4
]
}
player_direction = "down"
player_frame = 0
player_image = PLAYER[player_direction][player_frame]
player_offset_x, player_offset_y = 0, 0
PLAYER_SHADOW = {
"left": [images.spacesuit_left_shadow, images.spacesuit_left_1_shadow,
images.spacesuit_left_2_shadow, images.spacesuit_left_3_shadow,
images.spacesuit_left_3_shadow
],
"right": [images.spacesuit_right_shadow, images.spacesuit_right_1_shadow,
images.spacesuit_right_2_shadow,
images.spacesuit_right_3_shadow, images.spacesuit_right_3_shadow
],
"up": [images.spacesuit_back_shadow, images.spacesuit_back_1_shadow,
images.spacesuit_back_2_shadow, images.spacesuit_back_3_shadow,
images.spacesuit_back_3_shadow
],
"down": [images.spacesuit_front_shadow, images.spacesuit_front_1_shadow,
images.spacesuit_front_2_shadow, images.spacesuit_front_3_shadow,
images.spacesuit_front_3_shadow
]
}
player_image_shadow = PLAYER_SHADOW["down"][0]
PILLARS = [
images.pillar, images.pillar_95, images.pillar_80,
images.pillar_60, images.pillar_50
]
wall_transparency_frame = 0
BLACK = (0, 0, 0)
BLUE = (0, 155, 255)
YELLOW = (255, 255, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (128, 0, 0)
air, energy = 100, 100
suit_stitched, air_fixed = False, False
launch_frame = 0
###############
## MAP ##
###############
MAP_WIDTH = 5
MAP_HEIGHT = 10
MAP_SIZE = MAP_WIDTH * MAP_HEIGHT
GAME_MAP = [ ["Room 0 - where unused objects are kept", 0, 0, False, False] ]
outdoor_rooms = range(1, 26)
for planetsectors in range(1, 26): #rooms 1 to 25 are generated here
GAME_MAP.append( ["The dusty planet surface", 13, 13, True, True] )
GAME_MAP += [
#["Room name", height, width, Top exit?, Right exit?]
["The airlock", 13, 5, True, False], # room 26
["The engineering lab", 13, 13, False, False], # room 27
["Poodle Mission Control", 9, 13, False, True], # room 28
["The viewing gallery", 9, 15, False, False], # room 29
["The crew's bathroom", 5, 5, False, False], # room 30
["The airlock entry bay", 7, 11, True, True], # room 31
["Left elbow room", 9, 7, True, False], # room 32
["Right elbow room", 7, 13, True, True], # room 33
["The science lab", 13, 13, False, True], # room 34
["The greenhouse", 13, 13, True, False], # room 35
[PLAYER_NAME + "'s sleeping quarters", 9, 11, False, False], # room 36
["West corridor", 15, 5, True, True], # room 37
["The briefing room", 7, 13, False, True], # room 38
["The crew's community room", 11, 13, True, False], # room 39
["Main Mission Control", 14, 14, False, False], # room 40
["The sick bay", 12, 7, True, False], # room 41
["West corridor", 9, 7, True, False], # room 42
["Utilities control room", 9, 9, False, True], # room 43
["Systems engineering bay", 9, 11, False, False], # room 44
["Security portal to Mission Control", 7, 7, True, False], # room 45
[FRIEND1_NAME + "'s sleeping quarters", 9, 11, True, True], # room 46
[FRIEND2_NAME + "'s sleeping quarters", 9, 11, True, True], # room 47
["The pipeworks", 13, 11, True, False], # room 48
["The chief scientist's office", 9, 7, True, True], # room 49
["The robot workshop", 9, 11, True, False] # room 50
]
#simple sanity check on map above to check data entry
assert len(GAME_MAP)-1 == MAP_SIZE, "Map size and GAME_MAP don't match"
###############
## OBJECTS ##
###############
objects = {
0: [images.floor, None, "The floor is shiny and clean"],
1: [images.pillar, images.full_shadow, "The wall is smooth and cold"],
2: [images.soil, None, "It's like a desert. Or should that be dessert?"],
3: [images.pillar_low, images.half_shadow, "The wall is smooth and cold"],
4: [images.bed, images.half_shadow, "A tidy and comfortable bed"],
5: [images.table, images.half_shadow, "It's made from strong plastic."],
6: [images.chair_left, None, "A chair with a soft cushion"],
7: [images.chair_right, None, "A chair with a soft cushion"],
8: [images.bookcase_tall, images.full_shadow,
"Bookshelves, stacked with reference books"],
9: [images.bookcase_small, images.half_shadow,
"Bookshelves, stacked with reference books"],
10: [images.cabinet, images.half_shadow,
"A small locker, for storing personal items"],
11: [images.desk_computer, images.half_shadow,
"A computer. Use it to run life support diagnostics"],
12: [images.plant, images.plant_shadow, "A spaceberry plant, grown here"],
13: [images.electrical1, images.half_shadow,
"Electrical systems used for powering the space station"],
14: [images.electrical2, images.half_shadow,
"Electrical systems used for powering the space station"],
15: [images.cactus, images.cactus_shadow, "Ouch! Careful on the cactus!"],
16: [images.shrub, images.shrub_shadow,
"A space lettuce. A bit limp, but amazing it's growing here!"],
17: [images.pipes1, images.pipes1_shadow, "Water purification pipes"],
18: [images.pipes2, images.pipes2_shadow,
"Pipes for the life support systems"],
19: [images.pipes3, images.pipes3_shadow,
"Pipes for the life support systems"],
20: [images.door, images.door_shadow, "Safety door. Opens automatically \
for astronauts in functioning spacesuits."],
21: [images.door, images.door_shadow, "The airlock door. \
For safety reasons, it requires two person operation."],
22: [images.door, images.door_shadow, "A locked door. It needs " \
+ PLAYER_NAME + "'s access card"],
23: [images.door, images.door_shadow, "A locked door. It needs " \
+ FRIEND1_NAME + "'s access card"],
24: [images.door, images.door_shadow, "A locked door. It needs " \
+ FRIEND2_NAME + "'s access card"],
25: [images.door, images.door_shadow,
"A locked door. It is opened from Main Mission Control"],
26: [images.door, images.door_shadow,
"A locked door in the engineering bay."],
27: [images.map, images.full_shadow,
"The screen says the crash site was Sector: " \
+ str(LANDER_SECTOR) + " // X: " + str(LANDER_X) + \
" // Y: " + str(LANDER_Y)],
28: [images.rock_large, images.rock_large_shadow,
"A rock. Its coarse surface feels like a whetstone", "the rock"],
29: [images.rock_small, images.rock_small_shadow,
"A small but heavy piece of Martian rock"],
30: [images.crater, None, "A crater in the planet surface"],
31: [images.fence, None,
"A fine gauze fence. It helps protect the station from dust storms"],
32: [images.contraption, images.contraption_shadow,
"One of the scientific experiments. It gently vibrates"],
33: [images.robot_arm, images.robot_arm_shadow,
"A robot arm, used for heavy lifting"],
34: [images.toilet, images.half_shadow, "A sparkling clean toilet"],
35: [images.sink, None, "A sink with running water", "the taps"],
36: [images.globe, images.globe_shadow,
"A giant globe of the planet. It gently glows from inside"],
37: [images.science_lab_table, None,
"A table of experiments, analyzing the planet soil and dust"],
38: [images.vending_machine, images.full_shadow,
"A vending machine. It requires a credit.", "the vending machine"],
39: [images.floor_pad, None,
"A pressure sensor to make sure nobody goes out alone."],
40: [images.rescue_ship, images.rescue_ship_shadow, "A rescue ship!"],
41: [images.mission_control_desk, images.mission_control_desk_shadow, \
"Mission Control stations."],
42: [images.button, images.button_shadow,
"The button for opening the time-locked door in engineering."],
43: [images.whiteboard, images.full_shadow,
"The whiteboard is used in brainstorms and planning meetings."],
44: [images.window, images.full_shadow,
"The window provides a view out onto the planet surface."],
45: [images.robot, images.robot_shadow, "A cleaning robot, turned off."],
46: [images.robot2, images.robot2_shadow,
"A planet surface exploration robot, awaiting set-up."],
47: [images.rocket, images.rocket_shadow, "A one-person craft in repair"],
48: [images.toxic_floor, None, "Toxic floor - do not walk on!"],
49: [images.drone, None, "A delivery drone"],
50: [images.energy_ball, None, "An energy ball - dangerous!"],
51: [images.energy_ball2, None, "An energy ball - dangerous!"],
52: [images.computer, images.computer_shadow,
"A computer workstation, for managing space station systems."],
53: [images.clipboard, None,
"A clipboard. Someone has doodled on it.", "the clipboard"],
54: [images.bubble_gum, None,
"A piece of sticky bubble gum. Spaceberry flavour.", "bubble gum"],
55: [images.yoyo, None, "A toy made of fine, strong string and plastic. \
Used for antigrav experiments.", PLAYER_NAME + "'s yoyo"],
56: [images.thread, None,
"A piece of fine, strong string", "a piece of string"],
57: [images.needle, None,
"A sharp needle from a cactus plant", "a cactus needle"],
58: [images.threaded_needle, None,
"A cactus needle, spearing a length of string", "needle and string"],
59: [images.canister, None,
"The air canister has a leak.", "a leaky air canister"],
60: [images.canister, None,
"It looks like the seal will hold!", "a sealed air canister"],
61: [images.mirror, None,
"The mirror throws a circle of light on the walls.", "a mirror"],
62: [images.bin_empty, None,
"A rarely used bin, made of light plastic", "a bin"],
63: [images.bin_full, None,
"A heavy bin full of water", "a bin full of water"],
64: [images.rags, None,
"An oily rag. Pick it up by one corner if you must!", "an oily rag"],
65: [images.hammer, None,
"A hammer. Maybe good for cracking things open...", "a hammer"],
66: [images.spoon, None, "A large serving spoon", "a spoon"],
67: [images.food_pouch, None,
"A dehydrated food pouch. It needs water.", "a dry food pack"],
68: [images.food, None,
"A food pouch. Use it to get 100% energy.", "ready-to-eat food"],
69: [images.book, None, "The book has the words 'Don't Panic' on the \
cover in large, friendly letters", "a book"],
70: [images.mp3_player, None,
"An MP3 player, with all the latest tunes", "an MP3 player"],
71: [images.lander, None, "The Poodle, a small space exploration craft. \
Its black box has a radio sealed inside.", "the Poodle lander"],
72: [images.radio, None, "A radio communications system, from the \
Poodle", "a communications radio"],
73: [images.gps_module, None, "A GPS Module", "a GPS module"],
74: [images.positioning_system, None, "Part of a positioning system. \
Needs a GPS module.", "a positioning interface"],
75: [images.positioning_system, None,
"A working positioning system", "a positioning computer"],
76: [images.scissors, None, "Scissors. They're too blunt to cut \
anything. Can you sharpen them?", "blunt scissors"],
77: [images.scissors, None,
"Razor-sharp scissors. Careful!", "sharpened scissors"],
78: [images.credit, None,
"A small coin for the station's vending systems",
"a station credit"],
79: [images.access_card, None,
"This access card belongs to " + PLAYER_NAME, "an access card"],
80: [images.access_card, None,
"This access card belongs to " + FRIEND1_NAME, "an access card"],
81: [images.access_card, None,
"This access card belongs to " + FRIEND2_NAME, "an access card"]
}
items_player_may_carry = list(range(53, 82))
# Numbers below are for floor, pressure pad, soil, toxic floor.
items_player_may_stand_on = items_player_may_carry + [0, 39, 2, 48]
###############
## SCENERY ##
###############
# Scenery describes objects that cannot move between rooms.
# room number: [[object number, y position, x position]...]
scenery = {
26: [[39,8,2]],
27: [[33,5,5], [33,1,1], [33,1,8], [47,5,2],
[47,3,10], [47,9,8], [42,1,6]],
28: [[27,0,3], [41,4,3], [41,4,7]],
29: [[7,2,6], [6,2,8], [12,1,13], [44,0,1],
[36,4,10], [10,1,1], [19,4,2], [17,4,4]],
30: [[34,1,1], [35,1,3]],
31: [[11,1,1], [19,1,8], [46,1,3]],
32: [[48,2,2], [48,2,3], [48,2,4], [48,3,2], [48,3,3],
[48,3,4], [48,4,2], [48,4,3], [48,4,4]],
33: [[13,1,1], [13,1,3], [13,1,8], [13,1,10], [48,2,1],
[48,2,7], [48,3,6], [48,3,3]],
34: [[37,2,2], [32,6,7], [37,10,4], [28,5,3]],
35: [[16,2,9], [16,2,2], [16,3,3], [16,3,8], [16,8,9], [16,8,2], [16,1,8],
[16,1,3], [12,8,6], [12,9,4], [12,9,8],
[15,4,6], [12,7,1], [12,7,11]],
36: [[4,3,1], [9,1,7], [8,1,8], [8,1,9],
[5,5,4], [6,5,7], [10,1,1], [12,1,2]],
37: [[48,3,1], [48,3,2], [48,7,1], [48,5,2], [48,5,3],
[48,7,2], [48,9,2], [48,9,3], [48,11,1], [48,11,2]],
38: [[43,0,2], [6,2,2], [6,3,5], [6,4,7], [6,2,9], [45,1,10]],
39: [[38,1,1], [7,3,4], [7,6,4], [5,3,6], [5,6,6],
[6,3,9], [6,6,9], [45,1,11], [12,1,8], [12,1,4]],
40: [[41,5,3], [41,5,7], [41,9,3], [41,9,7],
[13,1,1], [13,1,3], [42,1,12]],
41: [[4,3,1], [10,3,5], [4,5,1], [10,5,5], [4,7,1],
[10,7,5], [12,1,1], [12,1,5]],
44: [[46,4,3], [46,4,5], [18,1,1], [19,1,3],
[19,1,5], [52,4,7], [14,1,8]],
45: [[48,2,1], [48,2,2], [48,3,3], [48,3,4], [48,1,4], [48,1,1]],
46: [[10,1,1], [4,1,2], [8,1,7], [9,1,8], [8,1,9], [5,4,3], [7,3,2]],
47: [[9,1,1], [9,1,2], [10,1,3], [12,1,7], [5,4,4], [6,4,7], [4,1,8]],
48: [[17,4,1], [17,4,2], [17,4,3], [17,4,4], [17,4,5], [17,4,6], [17,4,7],
[17,8,1], [17,8,2], [17,8,3], [17,8,4],
[17,8,5], [17,8,6], [17,8,7], [14,1,1]],
49: [[14,2,2], [14,2,4], [7,5,1], [5,5,3], [48,3,3], [48,3,4]],
50: [[45,4,8], [11,1,1], [13,1,8], [33,2,1], [46,4,6]]
}
checksum = 0
check_counter = 0
for key, room_scenery_list in scenery.items():
for scenery_item_list in room_scenery_list:
checksum += (scenery_item_list[0] * key
+ scenery_item_list[1] * (key + 1)
+ scenery_item_list[2] * (key + 2))
check_counter += 1
print(check_counter, "scenery items")
assert check_counter == 161, "Expected 161 scenery items"
assert checksum == 200095, "Error in scenery data"
print("Scenery checksum: " + str(checksum))
for room in range(1, 26):# Add random scenery in planet locations.
if room != 13: # Skip room 13.
scenery_item = random.choice([16, 28, 29, 30])
scenery[room] = [[scenery_item, random.randint(2, 10),
random.randint(2, 10)]]
# Use loops to add fences to the planet surface rooms.
for room_coordinate in range(0, 13):
for room_number in [1, 2, 3, 4, 5]: # Add top fence
scenery[room_number] += [[31, 0, room_coordinate]]
for room_number in [1, 6, 11, 16, 21]: # Add left fence
scenery[room_number] += [[31, room_coordinate, 0]]
for room_number in [5, 10, 15, 20, 25]: # Add right fence
scenery[room_number] += [[31, room_coordinate, 12]]
del scenery[21][-1] # Delete last fence panel in Room 21
del scenery[25][-1] # Delete last fence panel in Room 25
###############
## MAKE MAP ##
###############
def get_floor_type():
if current_room in outdoor_rooms:
return 2 # soil
else:
return 0 # tiled floor
def generate_map():
# This function makes the map for the current room,
# using room data, scenery data and prop data.
global room_map, room_width, room_height, room_name, hazard_map
global top_left_x, top_left_y, wall_transparency_frame
room_data = GAME_MAP[current_room]
room_name = room_data[0]
room_height = room_data[1]
room_width = room_data[2]
floor_type = get_floor_type()
if current_room in range(1, 21):
bottom_edge = 2 #soil
side_edge = 2 #soil
if current_room in range(21, 26):
bottom_edge = 1 #wall
side_edge = 2 #soil
if current_room > 25:
bottom_edge = 1 #wall
side_edge = 1 #wall
# Create top line of room map.
room_map=[[side_edge] * room_width]
# Add middle lines of room map (wall, floor to fill width, wall).
for y in range(room_height - 2):
room_map.append([side_edge]
+ [floor_type]*(room_width - 2) + [side_edge])
# Add bottom line of room map.
room_map.append([bottom_edge] * room_width)
# Add doorways.
middle_row = int(room_height / 2)
middle_column = int(room_width / 2)
if room_data[4]: # If exit at right of this room
room_map[middle_row][room_width - 1] = floor_type
room_map[middle_row+1][room_width - 1] = floor_type
room_map[middle_row-1][room_width - 1] = floor_type
if current_room % MAP_WIDTH != 1: # If room is not on left of map
room_to_left = GAME_MAP[current_room - 1]
# If room on the left has a right exit, add left exit in this room
if room_to_left[4]:
room_map[middle_row][0] = floor_type
room_map[middle_row + 1][0] = floor_type
room_map[middle_row - 1][0] = floor_type
if room_data[3]: # If exit at top of this room
room_map[0][middle_column] = floor_type
room_map[0][middle_column + 1] = floor_type
room_map[0][middle_column - 1] = floor_type
if current_room <= MAP_SIZE - MAP_WIDTH: # If room is not on bottom row
room_below = GAME_MAP[current_room+MAP_WIDTH]
# If room below has a top exit, add exit at bottom of this one
if room_below[3]:
room_map[room_height-1][middle_column] = floor_type
room_map[room_height-1][middle_column + 1] = floor_type
room_map[room_height-1][middle_column - 1] = floor_type
if current_room in scenery:
for this_scenery in scenery[current_room]:
scenery_number = this_scenery[0]
scenery_y = this_scenery[1]
scenery_x = this_scenery[2]
room_map[scenery_y][scenery_x] = scenery_number
image_here = objects[scenery_number][0]
image_width = image_here.get_width()
image_width_in_tiles = int(image_width / TILE_SIZE)
for tile_number in range(1, image_width_in_tiles):
room_map[scenery_y][scenery_x + tile_number] = 255
center_y = int(HEIGHT / 2) # Center of game window
center_x = int(WIDTH / 2)
room_pixel_width = room_width * TILE_SIZE # Size of room in pixels
room_pixel_height = room_height * TILE_SIZE
top_left_x = center_x - 0.5 * room_pixel_width
top_left_y = (center_y - 0.5 * room_pixel_height) + 110
for prop_number, prop_info in props.items():
prop_room = prop_info[0]
prop_y = prop_info[1]
prop_x = prop_info[2]
if (prop_room == current_room and
room_map[prop_y][prop_x] in [0, 39, 2]):
room_map[prop_y][prop_x] = prop_number
image_here = objects[prop_number][0]
image_width = image_here.get_width()
image_width_in_tiles = int(image_width / TILE_SIZE)
for tile_number in range(1, image_width_in_tiles):
room_map[prop_y][prop_x + tile_number] = 255
###############
## GAME LOOP ##
###############
def start_room():
show_text("You are here: " + room_name, 0)
def game_loop():
global player_x, player_y, current_room
global from_player_x, from_player_y
global player_image, player_image_shadow
global selected_item, item_carrying, energy
global player_offset_x, player_offset_y
global player_frame, player_direction
if game_over:
return
if player_frame > 0:
player_frame += 1
time.sleep(0.05)
if player_frame == 5:
player_frame = 0
player_offset_x = 0
player_offset_y = 0
# save player's current position
old_player_x = player_x
old_player_y = player_y
# move if key is pressed
if player_frame == 0:
if keyboard.right:
from_player_x = player_x
from_player_y = player_y
player_x += 1
player_direction = "right"
player_frame = 1
elif keyboard.left: #elif stops player making diagonal movements
from_player_x = player_x
from_player_y = player_y
player_x -= 1
player_direction = "left"
player_frame = 1
elif keyboard.up:
from_player_x = player_x
from_player_y = player_y
player_y -= 1
player_direction = "up"
player_frame = 1
elif keyboard.down:
from_player_x = player_x
from_player_y = player_y
player_y += 1
player_direction = "down"
player_frame = 1
# check for exiting the room
if player_x == room_width: # through door on RIGHT
#clock.unschedule(hazard_move)
current_room += 1
generate_map()
player_x = 0 # enter at left
player_y = int(room_height / 2) # enter at door
player_frame = 0
start_room()
return
if player_x == -1: # through door on LEFT
#clock.unschedule(hazard_move)
current_room -= 1
generate_map()
player_x = room_width - 1 # enter at right
player_y = int(room_height / 2) # enter at door
player_frame = 0
start_room()
return
if player_y == room_height: # through door at BOTTOM
#clock.unschedule(hazard_move)
current_room += MAP_WIDTH
generate_map()
player_y = 0 # enter at top
player_x = int(room_width / 2) # enter at door
player_frame = 0
start_room()
return
if player_y == -1: # through door at TOP
#clock.unschedule(hazard_move)
current_room -= MAP_WIDTH
generate_map()
player_y = room_height - 1 # enter at bottom
player_x = int(room_width / 2) # enter at door
player_frame = 0
start_room()
return
if keyboard.g:
pick_up_object()
if keyboard.tab and len(in_my_pockets) > 0:
selected_item += 1
if selected_item > len(in_my_pockets) - 1:
selected_item = 0
item_carrying = in_my_pockets[selected_item]
display_inventory()
if keyboard.d and item_carrying:
drop_object(old_player_y, old_player_x)
if keyboard.space:
examine_object()
if keyboard.u:
use_object()
# If the player is standing somewhere they shouldn't, move them back.
if room_map[player_y][player_x] not in items_player_may_stand_on: #\
# or hazard_map[player_y][player_x] != 0:
player_x = old_player_x
player_y = old_player_y
player_frame = 0
if player_direction == "right" and player_frame > 0:
player_offset_x = -1 + (0.25 * player_frame)
if player_direction == "left" and player_frame > 0:
player_offset_x = 1 - (0.25 * player_frame)
if player_direction == "up" and player_frame > 0:
player_offset_y = 1 - (0.25 * player_frame)
if player_direction == "down" and player_frame > 0:
player_offset_y = -1 + (0.25 * player_frame)
###############
## DISPLAY ##
###############
def draw_image(image, y, x):
screen.blit(
image,
(top_left_x + (x * TILE_SIZE),
top_left_y + (y * TILE_SIZE) - image.get_height())
)
def draw_shadow(image, y, x):
screen.blit(
image,
(top_left_x + (x * TILE_SIZE),
top_left_y + (y * TILE_SIZE))
)
def draw_player():
player_image = PLAYER[player_direction][player_frame]
draw_image(player_image, player_y + player_offset_y,
player_x + player_offset_x)
player_image_shadow = PLAYER_SHADOW[player_direction][player_frame]
draw_shadow(player_image_shadow, player_y + player_offset_y,
player_x + player_offset_x)
def draw():
if game_over:
return
# Clear the game arena area.
box = Rect((0, 150), (800, 600))
screen.draw.filled_rect(box, RED)
box = Rect ((0, 0), (800, top_left_y + (room_height - 1)*30))
screen.surface.set_clip(box)
floor_type = get_floor_type()
for y in range(room_height): # Lay down floor tiles, then items on floor.
for x in range(room_width):
draw_image(objects[floor_type][0], y, x)
# Next line enables shadows to fall on top of objects on floor
if room_map[y][x] in items_player_may_stand_on:
draw_image(objects[room_map[y][x]][0], y, x)
# Pressure pad in room 26 is added here, so props can go on top of it.
if current_room == 26:
draw_image(objects[39][0], 8, 2)
image_on_pad = room_map[8][2]
if image_on_pad > 0:
draw_image(objects[image_on_pad][0], 8, 2)
for y in range(room_height):
for x in range(room_width):
item_here = room_map[y][x]
# Player cannot walk on 255: it marks spaces used by wide objects.
if item_here not in items_player_may_stand_on + [255]:
image = objects[item_here][0]
if (current_room in outdoor_rooms
and y == room_height - 1
and room_map[y][x] == 1) or \
(current_room not in outdoor_rooms
and y == room_height - 1
and room_map[y][x] == 1
and x > 0
and x < room_width - 1):
# Add transparent wall image in the front row.
image = PILLARS[wall_transparency_frame]
draw_image(image, y, x)
if objects[item_here][1] is not None: # If object has a shadow
shadow_image = objects[item_here][1]
# if shadow might need horizontal tiling
if shadow_image in [images.half_shadow,
images.full_shadow]:
shadow_width = int(image.get_width() / TILE_SIZE)
# Use shadow across width of object.
for z in range(0, shadow_width):
draw_shadow(shadow_image, y, x+z)
else:
draw_shadow(shadow_image, y, x)
if (player_y == y):
draw_player()
screen.surface.set_clip(None)
def adjust_wall_transparency():
global wall_transparency_frame
if (player_y == room_height - 2
and room_map[room_height - 1][player_x] == 1
and wall_transparency_frame < 4):
wall_transparency_frame += 1 # Fade wall out.
if ((player_y < room_height - 2
or room_map[room_height - 1][player_x] != 1)
and wall_transparency_frame > 0):
wall_transparency_frame -= 1 # Fade wall in.
def show_text(text_to_show, line_number):
if game_over:
return
text_lines = [15, 50]
box = Rect((0, text_lines[line_number]), (800, 35))
screen.draw.filled_rect(box, BLACK)
screen.draw.text(text_to_show,
(20, text_lines[line_number]), color=GREEN)
###############
## PROPS ##
###############
# Props are objects that may move between rooms, appear or disappear.
# All props must be set up here. Props not yet in the game go into room 0.
# object number : [room, y, x]
props = {
20: [31, 0, 4], 21: [26, 0, 1], 22: [41, 0, 2], 23: [39, 0, 5],
24: [45, 0, 2],
25: [32, 0, 2], 26: [27, 12, 5], # two sides of same door
40: [0, 8, 6], 53: [45, 1, 5], 54: [0, 0, 0], 55: [0, 0, 0],
56: [0, 0, 0], 57: [35, 4, 6], 58: [0, 0, 0], 59: [31, 1, 7],
60: [0, 0, 0], 61: [36, 1, 1], 62: [36, 1, 6], 63: [0, 0, 0],
64: [27, 8, 3], 65: [50, 1, 7], 66: [39, 5, 6], 67: [46, 1, 1],
68: [0, 0, 0], 69: [30, 3, 3], 70: [47, 1, 3],
71: [0, LANDER_Y, LANDER_X], 72: [0, 0, 0], 73: [27, 4, 6],
74: [28, 1, 11], 75: [0, 0, 0], 76: [41, 3, 5], 77: [0, 0, 0],
78: [35, 9, 11], 79: [26, 3, 2], 80: [41, 7, 5], 81: [29, 1, 1]
}
checksum = 0
for key, prop in props.items():
if key != 71: # 71 is skipped because it's different each game.
checksum += (prop[0] * key
+ prop[1] * (key + 1)
+ prop[2] * (key + 2))
print(len(props), "props")
assert len(props) == 37, "Expected 37 prop items"
print("Prop checksum:", checksum)
assert checksum == 61414, "Error in props data"
in_my_pockets = [55]
selected_item = 0 # the first item
item_carrying = in_my_pockets[selected_item]
RECIPES = [
[62, 35, 63], [76, 28, 77], [78, 38, 54], [73, 74, 75],
[59, 54, 60], [77, 55, 56], [56, 57, 58], [71, 65, 72],
[88, 58, 89], [89, 60, 90], [67, 35, 68]
]
checksum = 0
check_counter = 1
for recipe in RECIPES:
checksum += (recipe[0] * check_counter
+ recipe[1] * (check_counter + 1)
+ recipe[2] * (check_counter + 2))
check_counter += 3
print(len(RECIPES), "recipes")
assert len(RECIPES) == 11, "Expected 11 recipes"
assert checksum == 37296, "Error in recipes data"
print("Recipe checksum:", checksum)
#######################
## PROP INTERACTIONS ##
#######################
def find_object_start_x():
checker_x = player_x
while room_map[player_y][checker_x] == 255:
checker_x -= 1
return checker_x
def get_item_under_player():
item_x = find_object_start_x()
item_player_is_on = room_map[player_y][item_x]
return item_player_is_on
def pick_up_object():
global room_map
# Get object number at player's location.
item_player_is_on = get_item_under_player()
if item_player_is_on in items_player_may_carry:
# Clear the floor space.
room_map[player_y][player_x] = get_floor_type()
add_object(item_player_is_on)
show_text("Now carrying " + objects[item_player_is_on][3], 0)
sounds.pickup.play()
time.sleep(0.5)
else:
show_text("You can't carry that!", 0)
def add_object(item): # Adds item to inventory.
global selected_item, item_carrying
in_my_pockets.append(item)
item_carrying = item
# Minus one because indexes start at 0.
selected_item = len(in_my_pockets) - 1
display_inventory()
props[item][0] = 0 # Carried objects go into room 0 (off the map).
def display_inventory():
box = Rect((0, 45), (800, 105))
screen.draw.filled_rect(box, BLACK)
if len(in_my_pockets) == 0:
return
start_display = (selected_item // 16) * 16
list_to_show = in_my_pockets[start_display : start_display + 16]
selected_marker = selected_item % 16
for item_counter in range(len(list_to_show)):
item_number = list_to_show[item_counter]
image = objects[item_number][0]
screen.blit(image, (25 + (46 * item_counter), 90))
box_left = (selected_marker * 46) - 3
box = Rect((22 + box_left, 85), (40, 40))
screen.draw.rect(box, WHITE)
item_highlighted = in_my_pockets[selected_item]
description = objects[item_highlighted][2]
screen.draw.text(description, (20, 130), color="white")
def drop_object(old_y, old_x):
global room_map, props
if room_map[old_y][old_x] in [0, 2, 39]: # places you can drop things
props[item_carrying][0] = current_room
props[item_carrying][1] = old_y
props[item_carrying][2] = old_x
room_map[old_y][old_x] = item_carrying
show_text("You have dropped " + objects[item_carrying][3], 0)
sounds.drop.play()
remove_object(item_carrying)
time.sleep(0.5)
else: # This only happens if there is already a prop here
show_text("You can't drop that there.", 0)
time.sleep(0.5)
def remove_object(item): # Takes item out of inventory
global selected_item, in_my_pockets, item_carrying
in_my_pockets.remove(item)
selected_item = selected_item - 1
if selected_item < 0:
selected_item = 0
if len(in_my_pockets) == 0: # If they're not carrying anything
item_carrying = False # Set item_carrying to False
else: # Otherwise set it to the new selected item
item_carrying = in_my_pockets[selected_item]
display_inventory()
def examine_object():
item_player_is_on = get_item_under_player()
left_tile_of_item = find_object_start_x()
if item_player_is_on in [0, 2]: # don't describe the floor
return
description = "You see: " + objects[item_player_is_on][2]
for prop_number, details in props.items():
# props = object number: [room number, y, x]
if details[0] == current_room: # if prop is in the room
# If prop is hidden (= at player's location but not on map)
if (details[1] == player_y
and details[2] == left_tile_of_item
and room_map[details[1]][details[2]] != prop_number):
add_object(prop_number)
description = "You found " + objects[prop_number][3]
sounds.combine.play()
show_text(description, 0)
time.sleep(0.5)
#################
## USE OBJECTS ##
#################
def use_object():
global room_map, props, item_carrying, air, selected_item, energy
global in_my_pockets, suit_stitched, air_fixed, game_over
use_message = "You fiddle around with it but don't get anywhere."
standard_responses = {
4: "Air is running out! You can't take this lying down!",
6: "This is no time to sit around!",
7: "This is no time to sit around!",
32: "It shakes and rumbles, but nothing else happens.",
34: "Ah! That's better. Now wash your hands.",
35: "You wash your hands and shake the water off.",
37: "The test tubes smoke slightly as you shake them.",
54: "You chew the gum. It's sticky like glue.",
55: "The yoyo bounces up and down, slightly slower than on Earth",
56: "It's a bit too fiddly. Can you thread it on something?",
59: "You need to fix the leak before you can use the canister",
61: "You try signalling with the mirror, but nobody can see you.",
62: "Don't throw resources away. Things might come in handy...",
67: "To enjoy yummy space food, just add water!",
75: "You are at Sector: " + str(current_room) + " // X: " \
+ str(player_x) + " // Y: " + str(player_y)
}
# Get object number at player's location.
item_player_is_on = get_item_under_player()
for this_item in [item_player_is_on, item_carrying]:
if this_item in standard_responses:
use_message = standard_responses[this_item]
if item_carrying == 70 or item_player_is_on == 70:
use_message = "Banging tunes!"
sounds.steelmusic.play(2)
elif item_player_is_on == 11:
use_message = "AIR: " + str(air) + \
"% / ENERGY " + str(energy) + "% / "
if not suit_stitched:
use_message += "*ALERT* SUIT FABRIC TORN / "
if not air_fixed:
use_message += "*ALERT* SUIT AIR BOTTLE MISSING"
if suit_stitched and air_fixed:
use_message += " SUIT OK"
show_text(use_message, 0)
sounds.say_status_report.play()
time.sleep(0.5)
# If "on" the computer, player intention is clearly status update.
# Return to stop another object use accidentally overriding this.
return
elif item_carrying == 60 or item_player_is_on == 60:
use_message = "You fix " + objects[60][3] + " to the suit"
air_fixed = True
air = 90
air_countdown()
remove_object(60)
elif (item_carrying == 58 or item_player_is_on == 58) \
and not suit_stitched:
use_message = "You use " + objects[56][3] + \
" to repair the suit fabric"
suit_stitched = True
remove_object(58)
elif item_carrying == 72 or item_player_is_on == 72:
use_message = "You radio for help. A rescue ship is coming. \
Rendezvous Sector 13, outside."
props[40][0] = 13
elif (item_carrying == 66 or item_player_is_on == 66) \
and current_room in outdoor_rooms:
use_message = "You dig..."
if (current_room == LANDER_SECTOR
and player_x == LANDER_X
and player_y == LANDER_Y):
add_object(71)
use_message = "You found the Poodle lander!"
elif item_player_is_on == 40:
clock.unschedule(air_countdown)
show_text("Congratulations, "+ PLAYER_NAME +"!", 0)
show_text("Mission success! You have made it to safety.", 1)
game_over = True
sounds.take_off.play()
game_completion_sequence()
elif item_player_is_on == 16:
energy += 1
if energy > 100:
energy = 100
use_message = "You munch the lettuce and get a little energy back"
draw_energy_air()
elif item_player_is_on == 42:
if current_room == 27:
open_door(26)
props[25][0] = 0 # Door from RM32 to engineering bay
props[26][0] = 0 # Door inside engineering bay
clock.schedule_unique(shut_engineering_door, 60)
use_message = "You press the button"
show_text("Door to engineering bay is open for 60 seconds", 1)
sounds.say_doors_open.play()
sounds.doors.play()
elif item_carrying == 68 or item_player_is_on == 68:
energy = 100
use_message = "You use the food to restore your energy"
remove_object(68)
draw_energy_air()
if suit_stitched and air_fixed: # open airlock access
if current_room == 31 and props[20][0] == 31:
open_door(20) # which includes removing the door
sounds.say_airlock_open.play()
show_text("The computer tells you the airlock is now open.", 1)
elif props[20][0] == 31:
props[20][0] = 0 # remove door from map
sounds.say_airlock_open.play()
show_text("The computer tells you the airlock is now open.", 1)
for recipe in RECIPES:
ingredient1 = recipe[0]
ingredient2 = recipe[1]
combination = recipe[2]
if (item_carrying == ingredient1
and item_player_is_on == ingredient2) \
or (item_carrying == ingredient2
and item_player_is_on == ingredient1):
use_message = "You combine " + objects[ingredient1][3] \
+ " and " + objects[ingredient2][3] \
+ " to make " + objects[combination][3]
if item_player_is_on in props.keys():
props[item_player_is_on][0] = 0
room_map[player_y][player_x] = get_floor_type()
in_my_pockets.remove(item_carrying)
add_object(combination)
sounds.combine.play()
# {key object number: door object number}
ACCESS_DICTIONARY = { 79:22, 80:23, 81:24 }
if item_carrying in ACCESS_DICTIONARY:
door_number = ACCESS_DICTIONARY[item_carrying]
if props[door_number][0] == current_room:
use_message = "You unlock the door!"
sounds.say_doors_open.play()
sounds.doors.play()
open_door(door_number)
show_text(use_message, 0)
time.sleep(0.5)
def game_completion_sequence():
global launch_frame #(initial value is 0, set up in VARIABLES section)
box = Rect((0, 150), (800, 600))
screen.draw.filled_rect(box, (128, 0, 0))
box = Rect ((0, top_left_y - 30), (800, 390))
screen.surface.set_clip(box)
for y in range(0, 13):
for x in range(0, 13):
draw_image(images.soil, y, x)
launch_frame += 1
if launch_frame < 9:
draw_image(images.rescue_ship, 8 - launch_frame, 6)
draw_shadow(images.rescue_ship_shadow, 8 + launch_frame, 6)
clock.schedule(game_completion_sequence, 0.25)
else:
screen.surface.set_clip(None)
screen.draw.text("MISSION", (200, 380), color = "white",
fontsize = 128, shadow = (1, 1), scolor = "black")
screen.draw.text("COMPLETE", (145, 480), color = "white",
fontsize = 128, shadow = (1, 1), scolor = "black")
sounds.completion.play()
sounds.say_mission_complete.play()
###############
## DOORS ##
###############
def open_door(opening_door_number):
global door_frames, door_shadow_frames
global door_frame_number, door_object_number
door_frames = [images.door1, images.door2, images.door3,
images.door4, images.floor]
# (Final frame restores shadow ready for when door reappears).
door_shadow_frames = [images.door1_shadow, images.door2_shadow,
images.door3_shadow, images.door4_shadow,
images.door_shadow]
door_frame_number = 0
door_object_number = opening_door_number
do_door_animation()
def close_door(closing_door_number):
global door_frames, door_shadow_frames
global door_frame_number, door_object_number, player_y
door_frames = [images.door4, images.door3, images.door2,
images.door1, images.door]
door_shadow_frames = [images.door4_shadow, images.door3_shadow,
images.door2_shadow, images.door1_shadow,
images.door_shadow]
door_frame_number = 0
door_object_number = closing_door_number
# If player is in same row as a door, they must be in open doorway
if player_y == props[door_object_number][1]:
if player_y == 0: # if in the top doorway
player_y = 1 # move them down
else:
player_y = room_height - 2 # move them up
do_door_animation()
###############
## START ##
###############
clock.schedule_interval(game_loop, 0.03)
generate_map()
clock.schedule_interval(adjust_wall_transparency, 0.05)
clock.schedule_unique(display_inventory, 1)
| 39.36908
| 79
| 0.60301
|
06546f7c6d7996749ef1fee9de4291fc3480bfd1
| 3,962
|
py
|
Python
|
alipay/aop/api/request/AlipayInsDataAutoScoreQueryRequest.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/request/AlipayInsDataAutoScoreQueryRequest.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/request/AlipayInsDataAutoScoreQueryRequest.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayInsDataAutoScoreQueryModel import AlipayInsDataAutoScoreQueryModel
class AlipayInsDataAutoScoreQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayInsDataAutoScoreQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayInsDataAutoScoreQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.ins.data.auto.score.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.324138
| 148
| 0.643614
|
c5650e3bf56ed1b2670b4e803792f0c3c4bb4db2
| 7,289
|
py
|
Python
|
4_flask_restful/practice/JihyunLee/answer.py
|
geronimo03/WebStudio2019
|
c7a9b59098c2ce97bddfd7727963c3b0f48bdbba
|
[
"MIT"
] | 14
|
2019-03-06T10:32:40.000Z
|
2021-11-18T01:44:28.000Z
|
4_flask_restful/practice/JihyunLee/answer.py
|
geronimo03/WebStudio2019
|
c7a9b59098c2ce97bddfd7727963c3b0f48bdbba
|
[
"MIT"
] | 35
|
2019-03-13T07:04:02.000Z
|
2019-10-08T06:26:45.000Z
|
4_flask_restful/practice/JihyunLee/answer.py
|
geronimo03/WebStudio2019
|
c7a9b59098c2ce97bddfd7727963c3b0f48bdbba
|
[
"MIT"
] | 22
|
2019-03-11T11:00:24.000Z
|
2019-09-14T06:53:30.000Z
|
from flask import Flask, request
from flask_restful import Api, Resource
import json
import os
app = Flask(__name__)
api = Api(app)
class UserList(Resource):
filename = 'users.json'
def get_users(self):
users = []
if os.path.exists(self.filename):
with open(self.filename, 'r') as fp:
users = json.loads(fp.read())
return users
def get(self):
if not os.path.exists(self.filename):
return 'users.json is not exists'
r = self.get_users()
s = ''
for d in r:
email = d['email']
password = d['password']
s += '[email: {}, pw: {}]'.format(email, password)
return s
def post(self):
r_json = request.get_json()
email = r_json['email']
password = r_json['password']
r = self.get_users()
for d in r:
if email == d['email']:
return '{} is aleady exists'.format(email)
_id = 0
for d in r:
_id = max(_id, d['id'])
_id = _id + 1
r_json['id'] = _id
r.append(r_json)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(r))
return 'email: {}, pw: {}'.format(email, password)
def put(self):
r_json = request.get_json()
_id = r_json['id']
password = r_json['password']
users = self.get_users()
found = False
for idx, _ in enumerate(users):
if users[idx]['id'] == _id:
found = True
users[idx]['password'] = password
if not found:
return '{} is not exists'.format(_id)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(users))
return 'update password successfully'
def delete(self):
r_json = request.get_json()
_id = r_json['id']
users = self.get_users()
found = False
for idx, _ in enumerate(users):
if users[idx]['id'] == _id:
found = True
del users[idx]
if not found:
return '{} is not exists'.format(_id)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(users))
return '{} deleted successfully'.format(_id)
class ArticleList(Resource):
filename = 'articles.json'
def get_articles(self):
articles = []
if os.path.exists(self.filename):
with open(self.filename, 'r') as fp:
articles = json.loads(fp.read())
return articles
def get(self):
if not os.path.exists(self.filename):
return 'articles.json does not exist'
r = self.get_articles()
s = ''
for d in r:
id = d['id']
user_id = d['user_id']
title = d['title']
content = d['content']
s += '[id: {}, user_id: {}, title: {}, content: {}]'.format(id, user_id, title, content)
return s
def post(self):
r_json = request.get_json()
user_id = r_json['user_id']
title = r_json['title']
content = r_json['content']
r = self.get_articles()
for d in r:
if title == d['title']:
return '{} is already taken'.format(title)
_id = 0
for d in r:
_id = max(_id, d['id'])
_id = _id + 1
r_json['id'] = _id
r.append(r_json)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(r))
return 'id: {}, user_id: {}, title: {}, content: {}'.format(id, user_id, title, content)
def put (self):
r_json = request.get_json()
_id = r_json['id']
title = r_json['title']
content = r_json['content']
articles = self.get_articles()
found = False
for idx, _ in enumerate(articles):
if articles[idx]['id'] == _id:
found = True
articles[idx]['title'] = title
articles[idx]['content']= content
with open(self.filename,'w') as fp:
fp.write(json.dumps(articles))
return 'Edited successfully'
return '{} does not exist'.format(_id)
def delete (self):
r_json = request.get_json()
_id = r_json['id']
articles = self.get_articles()
found = False
for idx, _ in enumerate(articles):
if articles[idx]['id'] == _id:
found = True
del articles[idx]
with open(self.filename, 'w') as fp:
fp.write(json.dumps(articles))
return 'Deleted successfully'
return '{} does not exist'.format(_id)
class CommentList(Resource):
filename = 'comments.json'
def get_comments(self):
comments = []
if os.path.exists(self.filename):
with open(self.filename, 'r') as fp:
comments = json.loads(fp.read())
return comments
def get(self):
if not os.path.exists(self.filename):
return 'comments.json does not exist'
r = self.get_comments()
s = ''
for d in r:
id = d['id']
user_id = d['user_id']
article_id = d['article_id']
content = d['content']
s += '[id: {}, user_id: {}, article_id: {}, content: {}]'.format(id, user_id, article_id, content)
return s
def post(self):
r_json = request.get_json()
user_id = r_json['user_id']
article_id = r_json['article_id']
content = r_json['content']
r = self.get_comments()
_id = 0
for d in r:
_id = max(_id, d['id'])
_id = _id +1
r_json['id'] = _id
r. append(r_json)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(r))
return 'user_id: {}, article_id: {}, content: {}'. format(user_id, article_id, content)
def put(self):
r_json = request.get_json()
_id = r_json['id']
content = r_json['content']
comments = self.get_comments()
found = False
for idx, _ in enumerate(comments):
if comments[idx]['id'] == _id:
found = True
comments[idx]['content'] =content
with open(self.filename, 'w') as fp:
fp.write(json.dumps(comments))
return 'Comments Edited successfully'
return '{} does not exist'.format(_id)
def delete(self):
r_json = request.get_json()
_id = r_json['id']
comments = self.get_comments()
found = False
for idx, _ in enumerate(comments):
if comments[idx]['id'] == _id:
found = True
del comments[idx]
with open(self.filename, 'w') as fp:
fp.write(json.dumps(comments))
return 'Deleted successfully'
return '{} does not exist'.format(_id)
api.add_resource(UserList, '/api/users')
api.add_resource(ArticleList, '/api/articles')
api.add_resource(CommentList, '/api/comments')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| 30.885593
| 110
| 0.511593
|
f1cf3054931c7a5b84468d280367ca11e237c244
| 844
|
py
|
Python
|
src/utils/data_mgmt.py
|
Rohanbagulwar/DVC-NLP-Simple-usecase
|
9968439be55c64566250d01f19414401825884dc
|
[
"MIT"
] | null | null | null |
src/utils/data_mgmt.py
|
Rohanbagulwar/DVC-NLP-Simple-usecase
|
9968439be55c64566250d01f19414401825884dc
|
[
"MIT"
] | null | null | null |
src/utils/data_mgmt.py
|
Rohanbagulwar/DVC-NLP-Simple-usecase
|
9968439be55c64566250d01f19414401825884dc
|
[
"MIT"
] | null | null | null |
import logging
# from tqdm import tqdm
import random
import xml.etree.ElementTree as ET
import re
def process_posts(fd_in, fd_out_train, fd_out_test, target_tag, split):
line_num = 1
for line in (fd_in):
try:
fd_out = fd_out_train if random.random() > split else fd_out_test
attr = ET.fromstring(line).attrib
pid = attr.get("Id", "")
label = 1 if target_tag in attr.get("Tags", "") else 0
title = re.sub(r"\s+", " ", attr.get("Title", "")).strip()
body = re.sub(r"\s+", " ", attr.get("Body", "")).strip()
text = title + " " + body
fd_out.write(f"{pid}\t{label}\t{text}\n")
line_num += 1
except Exception as e:
msg = f"Skipping the broken line {line_num}: {e}\n"
logging.exception(msg)
| 33.76
| 77
| 0.554502
|
19311c998f39cd18458d7b364013a383e6b3d94c
| 5,137
|
py
|
Python
|
imagededup/handlers/search/retrieval.py
|
Incopro/imagededup
|
6ad3325512fce6b44fd62c628ff4a62ebdc5c7f2
|
[
"Apache-2.0"
] | 2
|
2019-12-06T08:48:32.000Z
|
2019-12-30T21:29:14.000Z
|
imagededup/handlers/search/retrieval.py
|
Incopro/imagededup
|
6ad3325512fce6b44fd62c628ff4a62ebdc5c7f2
|
[
"Apache-2.0"
] | 8
|
2020-09-25T22:25:25.000Z
|
2022-02-10T02:04:10.000Z
|
imagededup/handlers/search/retrieval.py
|
Incopro/imagededup
|
6ad3325512fce6b44fd62c628ff4a62ebdc5c7f2
|
[
"Apache-2.0"
] | null | null | null |
from typing import Callable, Dict, Union, Tuple
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from imagededup.handlers.search.bktree import BKTree
from imagededup.handlers.search.brute_force import BruteForce
from imagededup.utils.general_utils import parallelise
def cosine_similarity_chunk(t: Tuple) -> np.ndarray:
return cosine_similarity(t[0][t[1][0] : t[1][1]], t[0]).astype('float16')
def get_cosine_similarity(
X: np.ndarray, chunk_size: int = 1000, threshold: int = 10000
) -> np.ndarray:
n_rows = X.shape[0]
if n_rows <= threshold:
return cosine_similarity(X)
else:
print('Large feature matrix thus calculating cosine similarities in chunks...')
start_idxs = list(range(0, n_rows, chunk_size))
end_idxs = start_idxs[1:] + [n_rows]
cos_sim = parallelise(
cosine_similarity_chunk,
[(X, idxs) for i, idxs in enumerate(zip(start_idxs, end_idxs))],
)
return np.vstack(cos_sim)
class HashEval:
def __init__(
self,
test: Dict,
queries: Dict,
distance_function: Callable,
threshold: int = 5,
search_method: str = 'bktree',
) -> None:
"""
Initialize a HashEval object which offers an interface to control hashing and search methods for desired
dataset. Compute a map of duplicate images in the document space given certain input control parameters.
"""
self.test = test # database
self.queries = queries
self.distance_invoker = distance_function
self.threshold = threshold
self.query_results_map = None
if search_method == 'bktree':
self._fetch_nearest_neighbors_bktree() # bktree is the default search method
else:
self._fetch_nearest_neighbors_brute_force()
def _searcher(self, data_tuple) -> None:
"""
Perform search on a query passed in by _get_query_results multiprocessing part.
Args:
data_tuple: Tuple of (query_key, query_val, search_method_object, thresh)
Returns:
List of retrieved duplicate files and corresponding hamming distance for the query file.
"""
query_key, query_val, search_method_object, thresh = data_tuple
res = search_method_object.search(query=query_val, tol=thresh)
res = [i for i in res if i[0] != query_key] # to avoid self retrieval
return res
def _get_query_results(
self, search_method_object: Union[BruteForce, BKTree]
) -> None:
"""
Get result for the query using specified search object. Populate the global query_results_map.
Args:
search_method_object: BruteForce or BKTree object to get results for the query.
"""
args = list(
zip(
list(self.queries.keys()),
list(self.queries.values()),
[search_method_object] * len(self.queries),
[self.threshold] * len(self.queries),
)
)
result_map_list = parallelise(self._searcher, args)
result_map = dict(zip(list(self.queries.keys()), result_map_list))
self.query_results_map = {
k: [i for i in sorted(v, key=lambda tup: tup[1], reverse=False)]
for k, v in result_map.items()
} # {'filename.jpg': [('dup1.jpg', 3)], 'filename2.jpg': [('dup2.jpg', 10)]}
def _fetch_nearest_neighbors_brute_force(self) -> None:
"""
Wrapper function to retrieve results for all queries in dataset using brute-force search.
"""
print('Start: Retrieving duplicates using Brute force algorithm')
bruteforce = BruteForce(self.test, self.distance_invoker)
self._get_query_results(bruteforce)
print('End: Retrieving duplicates using Brute force algorithm')
def _fetch_nearest_neighbors_bktree(self) -> None:
"""
Wrapper function to retrieve results for all queries in dataset using a BKTree search.
"""
print('Start: Retrieving duplicates using BKTree algorithm')
built_tree = BKTree(self.test, self.distance_invoker) # construct bktree
self._get_query_results(built_tree)
print('End: Retrieving duplicates using BKTree algorithm')
def retrieve_results(self, scores: bool = False) -> Dict:
"""
Return results with or without scores.
Args:
scores: Boolean indicating whether results are to eb returned with or without scores.
Returns:
if scores is True, then a dictionary of the form {'image1.jpg': [('image1_duplicate1.jpg',
score), ('image1_duplicate2.jpg', score)], 'image2.jpg': [] ..}
if scores is False, then a dictionary of the form {'image1.jpg': ['image1_duplicate1.jpg',
'image1_duplicate2.jpg'], 'image2.jpg':['image1_duplicate1.jpg',..], ..}
"""
if scores:
return self.query_results_map
else:
return {k: [i[0] for i in v] for k, v in self.query_results_map.items()}
| 38.335821
| 112
| 0.638894
|
c20a745252df39c1c793868fea8d50485dea7c3f
| 151
|
py
|
Python
|
data types and variables exerscise/Triples of Latin Letters.py
|
nrgxtra/fundamentals
|
d9f35eb040c0e1009aad2f7305c035fda207a147
|
[
"MIT"
] | null | null | null |
data types and variables exerscise/Triples of Latin Letters.py
|
nrgxtra/fundamentals
|
d9f35eb040c0e1009aad2f7305c035fda207a147
|
[
"MIT"
] | null | null | null |
data types and variables exerscise/Triples of Latin Letters.py
|
nrgxtra/fundamentals
|
d9f35eb040c0e1009aad2f7305c035fda207a147
|
[
"MIT"
] | null | null | null |
n = int(input())
for i in range(n):
for j in range(n):
for h in range(n):
print(f'{chr(97+i)}{chr(97+j)}{chr(97+h)}')
| 18.875
| 56
| 0.456954
|
35616cb99d42189ffb1d878629f3eb605e2c649d
| 9,244
|
py
|
Python
|
nova/tests/unit/virt/vmwareapi/test_network_util.py
|
viveknandavanam/nova
|
556377b6915936467436c9d5bb33bc0e22244e1e
|
[
"Apache-2.0"
] | 1
|
2019-07-29T10:30:24.000Z
|
2019-07-29T10:30:24.000Z
|
nova/tests/unit/virt/vmwareapi/test_network_util.py
|
viveknandavanam/nova
|
556377b6915936467436c9d5bb33bc0e22244e1e
|
[
"Apache-2.0"
] | 11
|
2017-06-19T01:28:55.000Z
|
2017-06-23T02:01:47.000Z
|
nova/tests/unit/virt/vmwareapi/test_network_util.py
|
viveknandavanam/nova
|
556377b6915936467436c9d5bb33bc0e22244e1e
|
[
"Apache-2.0"
] | 7
|
2015-01-20T10:30:08.000Z
|
2020-02-05T10:29:05.000Z
|
# Copyright (c) 2014 VMware, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_vmware import vim_util
from nova import exception
from nova import test
from nova.tests.unit.virt.vmwareapi import fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vm_util
ResultSet = collections.namedtuple('ResultSet', ['objects'])
ObjectContent = collections.namedtuple('ObjectContent', ['obj', 'propSet'])
DynamicProperty = collections.namedtuple('DynamicProperty', ['name', 'val'])
class GetNetworkWithTheNameTestCase(test.NoDBTestCase):
def setUp(self):
super(GetNetworkWithTheNameTestCase, self).setUp()
fake.reset()
self.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession.vim',
stubs.fake_vim_prop)
self.stub_out('nova.virt.vmwareapi.driver.'
'VMwareAPISession.is_vim_object',
stubs.fake_is_vim_object)
self._session = driver.VMwareAPISession()
def _build_cluster_networks(self, networks):
"""Returns a set of results for a cluster network lookup.
This is an example:
(ObjectContent){
obj =
(obj){
value = "domain-c7"
_type = "ClusterComputeResource"
}
propSet[] =
(DynamicProperty){
name = "network"
val =
(ArrayOfManagedObjectReference){
ManagedObjectReference[] =
(ManagedObjectReference){
value = "network-54"
_type = "Network"
},
(ManagedObjectReference){
value = "dvportgroup-14"
_type = "DistributedVirtualPortgroup"
},
}
},
}]
"""
objects = []
obj = ObjectContent(obj=vim_util.get_moref("domain-c7",
"ClusterComputeResource"),
propSet=[])
value = fake.DataObject()
value.ManagedObjectReference = []
for network in networks:
value.ManagedObjectReference.append(network)
obj.propSet.append(
DynamicProperty(name='network',
val=value))
objects.append(obj)
return ResultSet(objects=objects)
def test_get_network_no_match(self):
net_morefs = [vim_util.get_moref("dvportgroup-135",
"DistributedVirtualPortgroup"),
vim_util.get_moref("dvportgroup-136",
"DistributedVirtualPortgroup")]
networks = self._build_cluster_networks(net_morefs)
self._continue_retrieval_called = False
def mock_call_method(module, method, *args, **kwargs):
if method == 'get_object_properties':
return networks
if method == 'get_object_property':
result = fake.DataObject()
result.name = 'no-match'
return result
if method == 'continue_retrieval':
self._continue_retrieval_called = True
with mock.patch.object(self._session, '_call_method',
mock_call_method):
res = network_util.get_network_with_the_name(self._session,
'fake_net',
'fake_cluster')
self.assertTrue(self._continue_retrieval_called)
self.assertIsNone(res)
def _get_network_dvs_match(self, name, token=False):
net_morefs = [vim_util.get_moref("dvportgroup-135",
"DistributedVirtualPortgroup")]
networks = self._build_cluster_networks(net_morefs)
def mock_call_method(module, method, *args, **kwargs):
if method == 'get_object_properties':
return networks
if method == 'get_object_property':
result = fake.DataObject()
if not token or self._continue_retrieval_called:
result.name = name
else:
result.name = 'fake_name'
result.key = 'fake_key'
result.distributedVirtualSwitch = 'fake_dvs'
return result
if method == 'continue_retrieval':
if token:
self._continue_retrieval_called = True
return networks
if method == 'cancel_retrieval':
self._cancel_retrieval_called = True
with mock.patch.object(self._session, '_call_method',
mock_call_method):
res = network_util.get_network_with_the_name(self._session,
'fake_net',
'fake_cluster')
self.assertIsNotNone(res)
def test_get_network_dvs_exact_match(self):
self._cancel_retrieval_called = False
self._get_network_dvs_match('fake_net')
self.assertTrue(self._cancel_retrieval_called)
def test_get_network_dvs_match(self):
self._cancel_retrieval_called = False
self._get_network_dvs_match('dvs_7-virtualwire-7-fake_net')
self.assertTrue(self._cancel_retrieval_called)
def test_get_network_dvs_match_with_token(self):
self._continue_retrieval_called = False
self._cancel_retrieval_called = False
self._get_network_dvs_match('dvs_7-virtualwire-7-fake_net',
token=True)
self.assertTrue(self._continue_retrieval_called)
self.assertTrue(self._cancel_retrieval_called)
def test_get_network_network_match(self):
net_morefs = [vim_util.get_moref("network-54", "Network")]
networks = self._build_cluster_networks(net_morefs)
def mock_call_method(module, method, *args, **kwargs):
if method == 'get_object_properties':
return networks
if method == 'get_object_property':
return 'fake_net'
with mock.patch.object(self._session, '_call_method',
mock_call_method):
res = network_util.get_network_with_the_name(self._session,
'fake_net',
'fake_cluster')
self.assertIsNotNone(res)
class GetVlanIdAndVswitchForPortgroupTestCase(test.NoDBTestCase):
@mock.patch.object(vm_util, 'get_host_ref')
def test_no_port_groups(self, mock_get_host_ref):
session = mock.Mock()
session._call_method.return_value = None
self.assertRaises(
exception.NovaException,
network_util.get_vlanid_and_vswitch_for_portgroup,
session,
'port_group_name',
'fake_cluster'
)
@mock.patch.object(vm_util, 'get_host_ref')
def test_valid_port_group(self, mock_get_host_ref):
session = mock.Mock()
session._call_method.return_value = self._fake_port_groups()
vlanid, vswitch = network_util.get_vlanid_and_vswitch_for_portgroup(
session,
'port_group_name',
'fake_cluster'
)
self.assertEqual(vlanid, 100)
self.assertEqual(vswitch, 'vswitch_name')
@mock.patch.object(vm_util, 'get_host_ref')
def test_unknown_port_group(self, mock_get_host_ref):
session = mock.Mock()
session._call_method.return_value = self._fake_port_groups()
vlanid, vswitch = network_util.get_vlanid_and_vswitch_for_portgroup(
session,
'unknown_port_group',
'fake_cluster'
)
self.assertIsNone(vlanid)
self.assertIsNone(vswitch)
def _fake_port_groups(self):
port_group_spec = fake.DataObject()
port_group_spec.name = 'port_group_name'
port_group_spec.vlanId = 100
port_group_spec.vswitchName = 'vswitch_name'
port_group = fake.DataObject()
port_group.vswitch = 'vswitch_name'
port_group.spec = port_group_spec
response = fake.DataObject()
response.HostPortGroup = [port_group]
return response
| 39.169492
| 78
| 0.583081
|
578bb162ee296a673f6b3c06d996f97d33b8af81
| 680
|
py
|
Python
|
poky/scripts/lib/build_perf/html.py
|
buildlinux/unityos
|
dcbe232d0589013d77a62c33959d6a69f9bfbc5e
|
[
"Apache-2.0"
] | 53
|
2018-02-28T08:51:32.000Z
|
2022-02-28T06:49:23.000Z
|
scripts/lib/build_perf/html.py
|
nareshgbhat/luv-yocto
|
48976c54238dda0791e274927371265d259c0e5a
|
[
"MIT"
] | 27
|
2018-01-25T00:26:53.000Z
|
2020-08-09T05:20:04.000Z
|
scripts/lib/build_perf/html.py
|
nareshgbhat/luv-yocto
|
48976c54238dda0791e274927371265d259c0e5a
|
[
"MIT"
] | 51
|
2018-02-21T04:46:08.000Z
|
2022-03-02T04:20:41.000Z
|
#
# Copyright (c) 2017, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
"""Helper module for HTML reporting"""
from jinja2 import Environment, PackageLoader
env = Environment(loader=PackageLoader('build_perf', 'html'))
template = env.get_template('report.html')
| 34
| 75
| 0.769118
|
e5ab6c6697bd5669e2ab509f1546d4224a4444d6
| 4,732
|
py
|
Python
|
osf_tests/management_commands/test_backfill_egap_metadata.py
|
gaybro8777/osf.io
|
30408511510a40bc393565817b343ef5fd76ab14
|
[
"Apache-2.0"
] | 628
|
2015-01-15T04:33:22.000Z
|
2022-03-30T06:40:10.000Z
|
osf_tests/management_commands/test_backfill_egap_metadata.py
|
gaybro8777/osf.io
|
30408511510a40bc393565817b343ef5fd76ab14
|
[
"Apache-2.0"
] | 4,712
|
2015-01-02T01:41:53.000Z
|
2022-03-30T14:18:40.000Z
|
osf_tests/management_commands/test_backfill_egap_metadata.py
|
Johnetordoff/osf.io
|
de10bf249c46cede04c78f7e6f7e352c69e6e6b5
|
[
"Apache-2.0"
] | 371
|
2015-01-12T16:14:08.000Z
|
2022-03-31T18:58:29.000Z
|
import pytest
from osf.management.commands.backfill_egap_provider_metadata import backfill_egap_metadata
from osf.management.commands.update_registration_schemas import update_registration_schemas
from osf.models import RegistrationSchema
from osf_tests.factories import RegistrationFactory, RegistrationProviderFactory
EGAP_ID_SCHEMA_KEY = 'q3'
REGISTRATION_DATE_SCHEMA_KEY = 'q4'
OLDER_EGAP_ID = 'ABC123'
OLDER_TIMESTAMP = '2020-10-04 08:30:00 -0400'
OLDEST_EGAP_ID = 'XYZ789'
OLDEST_TIMESTAMP = '03/01/2011 - 22:00'
@pytest.mark.django_db
class TestMigrateEgapRegistrationMetadata:
@pytest.fixture()
def egap(self):
return RegistrationProviderFactory(_id='egap')
@pytest.fixture()
def older_registration(self, egap):
schema = RegistrationSchema.objects.get(name='EGAP Registration', schema_version=3)
registration = RegistrationFactory(schema=schema)
registration.provider = egap
registration.registration_responses[EGAP_ID_SCHEMA_KEY] = OLDER_EGAP_ID
registration.registration_responses[REGISTRATION_DATE_SCHEMA_KEY] = OLDER_TIMESTAMP
registration.save()
return registration
@pytest.fixture()
def oldest_registration(self, egap):
schema = RegistrationSchema.objects.get(name='EGAP Registration', schema_version=2)
registration = RegistrationFactory(schema=schema)
registration.provider = egap
registration.registration_responses[EGAP_ID_SCHEMA_KEY] = OLDEST_EGAP_ID
registration.registration_responses[REGISTRATION_DATE_SCHEMA_KEY] = OLDEST_TIMESTAMP
registration.save()
return registration
@pytest.fixture()
def newer_registration(self, egap):
try:
schema = RegistrationSchema.objects.get(name='EGAP Registration', schema_version=4)
except Exception:
update_registration_schemas()
schema = RegistrationSchema.objects.get(name='EGAP Registration', schema_version=4)
registration = RegistrationFactory(schema=schema)
registration.provider = egap
registration.save()
return registration
@pytest.fixture()
def non_egap_registration(self):
return RegistrationFactory()
def test_backfill_egap_metadata(
self, newer_registration, older_registration,
oldest_registration, non_egap_registration):
assert older_registration.additional_metadata is None
assert oldest_registration.additional_metadata is None
backfilled_registration_count = backfill_egap_metadata()
assert backfilled_registration_count == 2
newer_registration.refresh_from_db()
older_registration.refresh_from_db()
oldest_registration.refresh_from_db()
non_egap_registration.refresh_from_db()
assert older_registration.additional_metadata['EGAP Registration ID'] == OLDER_EGAP_ID
# Automatically converted to UTC, apparently
expected_older_date_string = '2020-10-04 12:30:00'
assert older_registration.registered_date.strftime('%Y-%m-%d %H:%M:%S') == expected_older_date_string
assert oldest_registration.additional_metadata['EGAP Registration ID'] == OLDEST_EGAP_ID
expected_oldest_date_string = '2011-03-01 22:00:00'
assert oldest_registration.registered_date.strftime('%Y-%m-%d %H:%M:%S') == expected_oldest_date_string
# Should have been excluded based on version
assert newer_registration.additional_metadata is None
# Shold have been excluded based on provider
assert non_egap_registration.additional_metadata is None
def test_backfill_egap_metadata_dry_run(self, older_registration, oldest_registration):
backfill_count = backfill_egap_metadata(dry_run=True)
assert backfill_count == 2
older_registration.refresh_from_db()
oldest_registration.refresh_from_db()
assert older_registration.additional_metadata is None
assert oldest_registration.additional_metadata is None
def test_backfill_egap_metadata_ignores_updated_registrations(
self, older_registration, oldest_registration):
older_registration.additional_metadata = {'EGAP Registration ID': OLDER_EGAP_ID}
older_registration.save()
backfill_count = backfill_egap_metadata()
assert backfill_count == 1
oldest_registration.refresh_from_db()
assert oldest_registration.additional_metadata['EGAP Registration ID'] == OLDEST_EGAP_ID
assert backfill_egap_metadata() == 0
def test_backfill_egap_metadata_batch_size(
self, older_registration, oldest_registration):
assert backfill_egap_metadata(batch_size=1) == 1
| 41.876106
| 111
| 0.744505
|
f321fbc395c4358da192e1bbd761ff86399d3ac9
| 543
|
py
|
Python
|
manage.py
|
seLain/MissAchieve
|
e65ecf46d3c35b79151d526d0b0abce7b55a6652
|
[
"MIT"
] | null | null | null |
manage.py
|
seLain/MissAchieve
|
e65ecf46d3c35b79151d526d0b0abce7b55a6652
|
[
"MIT"
] | 7
|
2018-03-29T05:38:46.000Z
|
2021-06-10T20:09:49.000Z
|
manage.py
|
seLain/MissAchieve
|
e65ecf46d3c35b79151d526d0b0abce7b55a6652
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MissAchieve.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.9375
| 75
| 0.688766
|
15415a1901079c94d72442a3953863ab8d537c35
| 880
|
py
|
Python
|
config.py
|
lyori6/microblog_app
|
335135cebe90e647639a6fc6c659ca26923cc1e3
|
[
"MIT"
] | null | null | null |
config.py
|
lyori6/microblog_app
|
335135cebe90e647639a6fc6c659ca26923cc1e3
|
[
"MIT"
] | 1
|
2019-09-13T16:12:45.000Z
|
2019-09-13T16:12:45.000Z
|
config.py
|
hoquem/microblog
|
b0421a0157ac73cbb4f467838010eb20f888086c
|
[
"MIT"
] | null | null | null |
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['your-email@example.com']
LANGUAGES = ['en', 'es']
MS_TRANSLATOR_KEY = os.environ.get('MS_TRANSLATOR_KEY')
ELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL')
POSTS_PER_PAGE = 25
| 38.26087
| 71
| 0.710227
|
a1e2c99b92ea5a5af22263327dd30e46cfd54d6f
| 69,232
|
py
|
Python
|
pylogix/eip.py
|
zursch/pylogix
|
1d7889454b7db1d00dce7028cb81eae60ebda787
|
[
"Apache-2.0"
] | null | null | null |
pylogix/eip.py
|
zursch/pylogix
|
1d7889454b7db1d00dce7028cb81eae60ebda787
|
[
"Apache-2.0"
] | null | null | null |
pylogix/eip.py
|
zursch/pylogix
|
1d7889454b7db1d00dce7028cb81eae60ebda787
|
[
"Apache-2.0"
] | null | null | null |
'''
Originally created by Burt Peterson
Updated and maintained by Dustin Roeder (dmroeder@gmail.com)
Copyright 2019 Dustin Roeder
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import socket
import sys
import time
from datetime import datetime, timedelta
from .lgxDevice import LGXDevice, GetDevice, GetVendor
from random import randrange
from struct import pack, unpack_from
programNames = []
class PLC:
def __init__(self):
'''
Initialize our parameters
'''
self.IPAddress = ""
self.ProcessorSlot = 0
self.Micro800 = False
self.Port = 44818
self.VendorID = 0x1337
self.Context = 0x00
self.ContextPointer = 0
self.Socket = socket.socket()
self.SocketConnected = False
self.OTNetworkConnectionID=None
self.SessionHandle = 0x0000
self.SessionRegistered = False
self.SerialNumber = 0
self.OriginatorSerialNumber = 42
self.SequenceCounter = 1
self.ConnectionSize = 508
self.Offset = 0
self.KnownTags = {}
self.TagList = []
self.StructIdentifier = 0x0fCE
self.CIPTypes = {160:(88 ,"STRUCT", 'B'),
193:(1, "BOOL", '?'),
194:(1, "SINT", 'b'),
195:(2, "INT", 'h'),
196:(4, "DINT", 'i'),
197:(8, "LINT", 'q'),
198:(1, "USINT", 'B'),
199:(2, "UINT", 'H'),
200:(4, "UDINT", 'I'),
201:(8, "LWORD", 'Q'),
202:(4, "REAL", 'f'),
203:(8, "LREAL", 'd'),
211:(4, "DWORD", 'I'),
218:(0, "STRING", 'B')}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
'''
Clean up on exit
'''
return self._closeConnection()
def Read(self, tag, count=1, datatype=None):
'''
We have two options for reading depending on
the arguments, read a single tag, or read an array
'''
if isinstance(tag, (list, tuple)):
if len(tag) == 1:
return [ self._readTag(tag[0], count, datatype) ]
if datatype:
raise TypeError('Datatype should be set to None when reading lists')
return self._multiRead(tag)
else:
return self._readTag(tag, count, datatype)
def Write(self, tag, value, datatype=None):
'''
We have two options for writing depending on
the arguments, write a single tag, or write an array
'''
return self._writeTag(tag, value, datatype)
def MultiRead(self, tags):
'''
Read multiple tags in one request
'''
return self._multiRead(tags)
def GetPLCTime(self, raw=False):
'''
Get the PLC's clock time, return as human readable (default) or raw if raw=True
'''
return self._getPLCTime(raw)
def SetPLCTime(self):
'''
Sets the PLC's clock time
'''
return self._setPLCTime()
def GetTagList(self, allTags = True):
'''
Retrieves the tag list from the PLC
Optional parameter allTags set to True
If is set to False, it will return only controller
otherwise controller tags and program tags.
'''
if allTags:
self._getTagList()
self._getAllProgramsTags()
else:
self._getTagList()
self._getUDT()
return self.TagList
def GetProgramTagList(self, programName):
'''
Retrieves a program tag list from the PLC
programName = "Program:ExampleProgram"
'''
# Ensure programNames is not empty
if not programNames:
self._getTagList()
# Get a single program tags if progragName exists
if programName in programNames:
self._getProgramTagList(programName)
self._getUDT()
return self.TagList
if programName not in programNames:
print("Program not found, please check name!")
return None
def GetProgramsList(self):
'''
Retrieves a program names list from the PLC
Sanity check: checks if programNames is empty
and runs _getTagList
'''
if not programNames:
self._getTagList()
return programNames
def Discover(self):
'''
Query all the EIP devices on the network
'''
return self._discover()
def GetModuleProperties(self, slot):
'''
Get the properties of module in specified slot
'''
return self._getModuleProperties(slot)
def Close(self):
'''
Close the connection to the PLC
'''
return self._closeConnection()
def _readTag(self, tag, elements, dt):
'''
processes the read request
'''
self.Offset = 0
if not self._connect(): return None
t,b,i = _parseTagName(tag, 0)
self._initial_read(t, b, dt)
datatype = self.KnownTags[b][0]
bitCount = self.CIPTypes[datatype][0] * 8
if datatype == 211:
# bool array
tagData = self._buildTagIOI(tag, isBoolArray=True)
words = _getWordCount(i, elements, bitCount)
readRequest = self._addReadIOI(tagData, words)
elif BitofWord(t):
# bits of word
split_tag = tag.split('.')
bitPos = split_tag[len(split_tag)-1]
bitPos = int(bitPos)
tagData = self._buildTagIOI(tag, isBoolArray=False)
words = _getWordCount(bitPos, elements, bitCount)
readRequest = self._addReadIOI(tagData, words)
else:
# everything else
tagData = self._buildTagIOI(tag, isBoolArray=False)
readRequest = self._addReadIOI(tagData, elements)
eipHeader = self._buildEIPHeader(readRequest)
status, retData = self._getBytes(eipHeader)
if status == 0 or status == 6:
return self._parseReply(tag, elements, retData)
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Read failed: {}'.format(err))
def _writeTag(self, tag, value, dt):
'''
Processes the write request
'''
self.Offset = 0
writeData = []
if not self._connect(): return None
t,b,i = _parseTagName(tag, 0)
self._initial_read(t, b, dt)
dataType = self.KnownTags[b][0]
# check if values passed were a list
if isinstance(value, list):
elements = len(value)
else:
elements = 1
value = [value]
for v in value:
if dataType == 202 or dataType == 203:
writeData.append(float(v))
elif dataType == 160 or dataType == 218:
writeData.append(self._makeString(v))
else:
writeData.append(int(v))
# write a bit of a word, boolean array or everything else
if BitofWord(tag):
tagData = self._buildTagIOI(tag, isBoolArray=False)
writeRequest = self._addWriteBitIOI(tag, tagData, writeData, dataType)
elif dataType == 211:
tagData = self._buildTagIOI(tag, isBoolArray=True)
writeRequest = self._addWriteBitIOI(tag, tagData, writeData, dataType)
else:
tagData = self._buildTagIOI(tag, isBoolArray=False)
writeRequest = self._addWriteIOI(tagData, writeData, dataType)
eipHeader = self._buildEIPHeader(writeRequest)
status, retData = self._getBytes(eipHeader)
if status == 0:
return
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Write Failed: {}'.format(err))
def _multiRead(self, tags):
'''
Processes the multiple read request
'''
serviceSegments = []
segments = b""
tagCount = len(tags)
self.Offset = 0
if not self._connect(): return None
for tag in tags:
if isinstance(tag, (list, tuple)):
tag_name, base, ind = _parseTagName(tag[0], 0)
self._initial_read(tag_name, base, tag[1])
else:
tag_name, base, ind = _parseTagName(tag, 0)
self._initial_read(tag_name, base, None)
dataType = self.KnownTags[base][0]
if dataType == 211:
tagIOI = self._buildTagIOI(tag_name, isBoolArray=True)
else:
tagIOI = self._buildTagIOI(tag_name, isBoolArray=False)
readIOI = self._addReadIOI(tagIOI, 1)
serviceSegments.append(readIOI)
header = self._buildMultiServiceHeader()
segmentCount = pack('<H', tagCount)
temp = len(header)
if tagCount > 2:
temp += (tagCount-2)*2
offsets = pack('<H', temp)
# assemble all the segments
for i in range(tagCount):
segments += serviceSegments[i]
for i in range(tagCount-1):
temp += len(serviceSegments[i])
offsets += pack('<H', temp)
readRequest = header+segmentCount+offsets+segments
eipHeader = self._buildEIPHeader(readRequest)
status, retData = self._getBytes(eipHeader)
if status == 0:
return self._multiParser(tags, retData)
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Multi-read failed: {}'.format(err))
def _getPLCTime(self, raw=False):
'''
Requests the PLC clock time
'''
if not self._connect(): return None
AttributeService = 0x03
AttributeSize = 0x02
AttributeClassType = 0x20
AttributeClass = 0x8B
AttributeInstanceType = 0x24
AttributeInstance = 0x01
AttributeCount = 0x01
TimeAttribute = 0x0B
AttributePacket = pack('<BBBBBBH1H',
AttributeService,
AttributeSize,
AttributeClassType,
AttributeClass,
AttributeInstanceType,
AttributeInstance,
AttributeCount,
TimeAttribute)
eipHeader = self._buildEIPHeader(AttributePacket)
status, retData = self._getBytes(eipHeader)
if status == 0:
# get the time from the packet
plcTime = unpack_from('<Q', retData, 56)[0]
if raw:
return plcTime
humanTime = datetime(1970, 1, 1) + timedelta(microseconds=plcTime)
return humanTime
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Failed to get PLC time: {}'.format(err))
def _setPLCTime(self):
'''
Requests the PLC clock time
'''
if not self._connect(): return None
AttributeService = 0x04
AttributeSize = 0x02
AttributeClassType = 0x20
AttributeClass = 0x8B
AttributeInstanceType = 0x24
AttributeInstance = 0x01
AttributeCount = 0x01
Attribute = 0x06
Time = int(time.time() * 1000000)
AttributePacket = pack('<BBBBBBHHQ',
AttributeService,
AttributeSize,
AttributeClassType,
AttributeClass,
AttributeInstanceType,
AttributeInstance,
AttributeCount,
Attribute,
Time)
eipHeader = self._buildEIPHeader(AttributePacket)
status, retData = self._getBytes(eipHeader)
if status == 0:
return
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Failed to set PLC time: {}'.format(err))
def _getTagList(self):
'''
Requests the controller tag list and returns a list of LgxTag type
'''
if not self._connect(): return None
self.Offset = 0
del programNames[:]
del self.TagList[:]
request = self._buildTagListRequest(programName=None)
eipHeader = self._buildEIPHeader(request)
status, retData = self._getBytes(eipHeader)
if status == 0 or status == 6:
self._extractTagPacket(retData, programName=None)
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Failed to get tag list {}'.format(err))
while status == 6:
self.Offset += 1
request = self._buildTagListRequest(programName=None)
eipHeader = self._buildEIPHeader(request)
status, retData = self._getBytes(eipHeader)
if status == 0 or status == 6:
self._extractTagPacket(retData, programName=None)
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Failed to get tag list: {}'.format(err))
return
def _getAllProgramsTags(self):
'''
Requests all programs tag list and appends to taglist (LgxTag type)
'''
if not self._connect(): return None
self.Offset = 0
for programName in programNames:
self.Offset = 0
request = self._buildTagListRequest(programName)
eipHeader = self._buildEIPHeader(request)
status, retData = self._getBytes(eipHeader)
if status == 0 or status == 6:
self._extractTagPacket(retData, programName)
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Failed to get program tag list: {}'.format(err))
while status == 6:
self.Offset += 1
request = self._buildTagListRequest(programName)
eipHeader = self._buildEIPHeader(request)
status, retData = self._getBytes(eipHeader)
if status == 0 or status == 6:
self._extractTagPacket(retData, programName)
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Failed to get program tag list: {}'.format(err))
return
def _getProgramTagList(self, programName):
'''
Requests tag list for a specific program and returns a list of LgxTag type
'''
if not self._connect(): return None
self.Offset = 0
del self.TagList[:]
request = self._buildTagListRequest(programName)
eipHeader = self._buildEIPHeader(request)
status, retData = self._getBytes(eipHeader)
if status == 0 or status == 6:
self._extractTagPacket(retData, programName)
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Failed to get program tag list: {}'.format(err))
while status == 6:
self.Offset += 1
request = self._buildTagListRequest(programName)
eipHeader = self._buildEIPHeader(request)
status, retData = self._getBytes(eipHeader)
if status == 0 or status == 6:
self._extractTagPacket(retData, programName)
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Failed to get program tag list: {}'.format(err))
return
def _getUDT(self):
# get only tags that are a struct
struct_tags = [x for x in self.TagList if x.Struct == 1]
# reduce our struct tag list to only unique instances
seen = set()
unique = [obj for obj in struct_tags if obj.DataTypeValue not in seen and not seen.add(obj.DataTypeValue)]
template = {}
for u in unique:
temp = self._getTemplateAttribute(u.DataTypeValue)
val = unpack_from('<I', temp[46:], 10)[0]
words = (val * 4) - 23
member_count = int(unpack_from('<H', temp[46:], 24)[0])
template[u.DataTypeValue] = [words, '', member_count]
for key,value in template.items():
t = self._getTemplate(key, value[0])
size = value[2] * 8
p = t[50:]
member_bytes = p[size:]
split_char = pack('<b', 0x00)
members = member_bytes.split(split_char)
split_char = pack('<b', 0x3b)
name = members[0].split(split_char)[0]
template[key][1] = str(name.decode('utf-8'))
for tag in self.TagList:
if tag.DataTypeValue in template:
tag.DataType = template[tag.DataTypeValue][1]
elif tag.SymbolType in self.CIPTypes:
tag.DataType = self.CIPTypes[tag.SymbolType][1]
return
def _getTemplateAttribute(self, instance):
'''
Get the attributes of a UDT
'''
if not self._connect(): return None
readRequest = self._buildTemplateAttributes(instance)
eipHeader = self._buildEIPHeader(readRequest)
status, retData = self._getBytes(eipHeader)
return retData
def _getTemplate(self, instance, dataLen):
'''
Get the members of a UDT so we can get it
'''
if not self._connect(): return None
readRequest = self._readTemplateService(instance, dataLen)
eipHeader = self._buildEIPHeader(readRequest)
status, retData = self._getBytes(eipHeader)
return retData
def _buildTemplateAttributes(self, instance):
TemplateService = 0x03
TemplateLength = 0x03
TemplateClassType = 0x20
TemplateClass = 0x6c
TemplateInstanceType = 0x25
TemplateInstance = instance
AttribCount = 0x04
Attrib4 = 0x04
Attrib3 = 0x03
Attrib2 = 0x02
Attrib1 = 0x01
return pack('<BBBBHHHHHHH',
TemplateService,
TemplateLength,
TemplateClassType,
TemplateClass,
TemplateInstanceType,
TemplateInstance,
AttribCount,
Attrib4,
Attrib3,
Attrib2,
Attrib1)
def _readTemplateService(self, instance, dataLen):
TemplateService = 0x4c
TemplateLength = 0x03
TemplateClassType = 0x20
TemplateClass = 0x6c
TemplateInstanceType = 0x25
TemplateInstance = instance
TemplateOffset = 0x00
DataLength = dataLen
return pack('<BBBBHHIH',
TemplateService,
TemplateLength,
TemplateClassType,
TemplateClass,
TemplateInstanceType,
TemplateInstance,
TemplateOffset,
DataLength)
def _discover(self):
devices = []
request = self._buildListIdentity()
# get available ip addresses
addresses = socket.getaddrinfo(socket.gethostname(), None)
# we're going to send a request for all available ipv4
# addresses and build a list of all the devices that reply
for ip in addresses:
if ip[0] == 2: # IP v4
# create a socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(0.5)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind((ip[4][0], 0))
s.sendto(request, ('255.255.255.255', 44818))
try:
while(1):
ret = s.recv(4096)
context = unpack_from('<Q', ret, 14)[0]
if context == 0x006d6f4d6948:
device = _parseIdentityResponse(ret)
if device.IPAddress:
devices.append(device)
except:
pass
# added this because looping through addresses above doesn't work on
# linux so this is a "just in case". If we don't get results with the
# above code, try one more time without binding to an address
if len(devices) == 0:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(0.5)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(request, ('255.255.255.255', 44818))
try:
while(1):
ret = s.recv(4096)
context = unpack_from('<Q', ret, 14)[0]
if context == 0x006d6f4d6948:
device = _parseIdentityResponse(ret)
if device.IPAddress:
devices.append(device)
except:
pass
return devices
def _getModuleProperties(self, slot):
'''
Request the properties of a module in a particular
slot. Returns LgxDevice
'''
if not self._connect(): return None
AttributeService = 0x01
AttributeSize = 0x02
AttributeClassType = 0x20
AttributeClass = 0x01
AttributeInstanceType = 0x24
AttributeInstance = 0x01
PathRouteSize = 0x01
Reserved = 0x00
Backplane = 0x01
LinkAddress = slot
AttributePacket = pack('<10B',
AttributeService,
AttributeSize,
AttributeClassType,
AttributeClass,
AttributeInstanceType,
AttributeInstance,
PathRouteSize,
Reserved,
Backplane,
LinkAddress)
frame = self._buildCIPUnconnectedSend() + AttributePacket
eipHeader = self._buildEIPSendRRDataHeader(len(frame)) + frame
pad = pack('<I', 0x00)
self.Socket.send(eipHeader)
retData = pad + self.recv_data()
status = unpack_from('<B', retData, 46)[0]
if status == 0:
return _parseIdentityResponse(retData)
else:
return LGXDevice()
def _connect(self):
'''
Open a connection to the PLC
'''
if self.SocketConnected:
return True
# Make sure the connection size is correct
if not 500 <= self.ConnectionSize <= 4000:
raise ValueError("ConnectionSize must be an integer between 500 and 4000")
try:
self.Socket = socket.socket()
self.Socket.settimeout(5.0)
self.Socket.connect((self.IPAddress, self.Port))
except(socket.error):
self.SocketConnected = False
self.SequenceCounter = 1
self.Socket.close()
raise
self.Socket.send(self._buildRegisterSession())
retData = self.recv_data()
if retData:
self.SessionHandle = unpack_from('<I', retData, 4)[0]
else:
self.SocketConnected = False
raise Exception("Failed to register session")
self.Socket.send(self._buildForwardOpenPacket())
retData = self.recv_data()
sts = unpack_from('<b', retData, 42)[0]
if not sts:
self.OTNetworkConnectionID = unpack_from('<I', retData, 44)[0]
self.SocketConnected = True
else:
self.SocketConnected = False
raise Exception("Forward Open Failed")
return True
def _closeConnection(self):
'''
Close the connection to the PLC (forward close, unregister session)
'''
self.SocketConnected = False
close_packet = self._buildForwardClosePacket()
unreg_packet = self._buildUnregisterSession()
try:
self.Socket.send(close_packet)
self.Socket.send(unreg_packet)
self.Socket.close()
except:
self.Socket.close()
finally:
pass
def _getBytes(self, data):
'''
Sends data and gets the return data
'''
try:
self.Socket.send(data)
retData = self.recv_data()
if retData:
status = unpack_from('<B', retData, 48)[0]
return status, retData
else:
return 1, None
except (socket.gaierror):
self.SocketConnected = False
return 1, None
except (IOError):
self.SocketConnected = False
return 7, None
def _buildRegisterSession(self):
'''
Register our CIP connection
'''
EIPCommand = 0x0065
EIPLength = 0x0004
EIPSessionHandle = self.SessionHandle
EIPStatus = 0x0000
EIPContext = self.Context
EIPOptions = 0x0000
EIPProtocolVersion = 0x01
EIPOptionFlag = 0x00
return pack('<HHIIQIHH',
EIPCommand,
EIPLength,
EIPSessionHandle,
EIPStatus,
EIPContext,
EIPOptions,
EIPProtocolVersion,
EIPOptionFlag)
def _buildUnregisterSession(self):
EIPCommand = 0x66
EIPLength = 0x0
EIPSessionHandle = self.SessionHandle
EIPStatus = 0x0000
EIPContext = self.Context
EIPOptions = 0x0000
return pack('<HHIIQI',
EIPCommand,
EIPLength,
EIPSessionHandle,
EIPStatus,
EIPContext,
EIPOptions)
def _buildForwardOpenPacket(self):
'''
Assemble the forward open packet
'''
forwardOpen = self._buildCIPForwardOpen()
rrDataHeader = self._buildEIPSendRRDataHeader(len(forwardOpen))
return rrDataHeader+forwardOpen
def _buildForwardClosePacket(self):
'''
Assemble the forward close packet
'''
forwardClose = self._buildForwardClose()
rrDataHeader = self._buildEIPSendRRDataHeader(len(forwardClose))
return rrDataHeader + forwardClose
def _buildCIPForwardOpen(self):
'''
Forward Open happens after a connection is made,
this will sequp the CIP connection parameters
'''
CIPPathSize = 0x02
CIPClassType = 0x20
CIPClass = 0x06
CIPInstanceType = 0x24
CIPInstance = 0x01
CIPPriority = 0x0A
CIPTimeoutTicks = 0x0e
CIPOTConnectionID = 0x20000002
CIPTOConnectionID = 0x20000001
self.SerialNumber = randrange(65000)
CIPConnectionSerialNumber = self.SerialNumber
CIPVendorID = self.VendorID
CIPOriginatorSerialNumber = self.OriginatorSerialNumber
CIPMultiplier = 0x03
CIPOTRPI = 0x00201234
CIPConnectionParameters = 0x4200
CIPTORPI = 0x00204001
CIPTransportTrigger = 0xA3
# decide whether to use the standard ForwardOpen
# or the large format
if self.ConnectionSize <= 511:
CIPService = 0x54
CIPConnectionParameters += self.ConnectionSize
pack_format = '<BBBBBBBBIIHHIIIHIHB'
else:
CIPService = 0x5B
CIPConnectionParameters = CIPConnectionParameters << 16
CIPConnectionParameters += self.ConnectionSize
pack_format = '<BBBBBBBBIIHHIIIIIIB'
CIPOTNetworkConnectionParameters = CIPConnectionParameters
CIPTONetworkConnectionParameters = CIPConnectionParameters
ForwardOpen = pack(pack_format,
CIPService,
CIPPathSize,
CIPClassType,
CIPClass,
CIPInstanceType,
CIPInstance,
CIPPriority,
CIPTimeoutTicks,
CIPOTConnectionID,
CIPTOConnectionID,
CIPConnectionSerialNumber,
CIPVendorID,
CIPOriginatorSerialNumber,
CIPMultiplier,
CIPOTRPI,
CIPOTNetworkConnectionParameters,
CIPTORPI,
CIPTONetworkConnectionParameters,
CIPTransportTrigger)
# add the connection path
if self.Micro800:
ConnectionPath = [0x20, 0x02, 0x24, 0x01]
else:
ConnectionPath = [0x01, self.ProcessorSlot, 0x20, 0x02, 0x24, 0x01]
ConnectionPathSize = int(len(ConnectionPath)/2)
pack_format = '<B' + str(len(ConnectionPath)) + 'B'
CIPConnectionPath = pack(pack_format, ConnectionPathSize, *ConnectionPath)
return ForwardOpen + CIPConnectionPath
def _buildForwardClose(self):
'''
Forward Close packet for closing the connection
'''
CIPService = 0x4E
CIPPathSize = 0x02
CIPClassType = 0x20
CIPClass = 0x06
CIPInstanceType = 0x24
CIPInstance = 0x01
CIPPriority = 0x0A
CIPTimeoutTicks = 0x0e
CIPConnectionSerialNumber = self.SerialNumber
CIPVendorID = self.VendorID
CIPOriginatorSerialNumber = self.OriginatorSerialNumber
ForwardClose = pack('<BBBBBBBBHHI',
CIPService,
CIPPathSize,
CIPClassType,
CIPClass,
CIPInstanceType,
CIPInstance,
CIPPriority,
CIPTimeoutTicks,
CIPConnectionSerialNumber,
CIPVendorID,
CIPOriginatorSerialNumber)
# add the connection path
if self.Micro800:
ConnectionPath = [0x20, 0x02, 0x24, 0x01]
else:
ConnectionPath = [0x01, self.ProcessorSlot, 0x20, 0x02, 0x24, 0x01]
ConnectionPathSize = int(len(ConnectionPath)/2)
pack_format = '<H' + str(len(ConnectionPath)) + 'B'
CIPConnectionPath = pack(pack_format, ConnectionPathSize, *ConnectionPath)
return ForwardClose + CIPConnectionPath
def _buildEIPSendRRDataHeader(self, frameLen):
EIPCommand = 0x6F
EIPLength = 16+frameLen
EIPSessionHandle = self.SessionHandle
EIPStatus = 0x00
EIPContext = self.Context
EIPOptions = 0x00
EIPInterfaceHandle = 0x00
EIPTimeout = 0x00
EIPItemCount = 0x02
EIPItem1Type = 0x00
EIPItem1Length = 0x00
EIPItem2Type = 0xB2
EIPItem2Length = frameLen
return pack('<HHIIQIIHHHHHH',
EIPCommand,
EIPLength,
EIPSessionHandle,
EIPStatus,
EIPContext,
EIPOptions,
EIPInterfaceHandle,
EIPTimeout,
EIPItemCount,
EIPItem1Type,
EIPItem1Length,
EIPItem2Type,
EIPItem2Length)
def _buildCIPUnconnectedSend(self):
'''
build unconnected send to request tag database
'''
CIPService = 0x52
CIPPathSize = 0x02
CIPClassType = 0x20
CIPClass = 0x06
CIPInstanceType = 0x24
CIPInstance = 0x01
CIPPriority = 0x0A
CIPTimeoutTicks = 0x0e
ServiceSize = 0x06
return pack('<BBBBBBBBH',
CIPService,
CIPPathSize,
CIPClassType,
CIPClass,
CIPInstanceType,
CIPInstance,
CIPPriority,
CIPTimeoutTicks,
ServiceSize)
def _buildTagIOI(self, tagName, isBoolArray):
'''
The tag IOI is basically the tag name assembled into
an array of bytes structured in a way that the PLC will
understand. It's a little crazy, but we have to consider the
many variations that a tag can be:
TagName (DINT)
TagName.1 (Bit of DINT)
TagName.Thing (UDT)
TagName[4].Thing[2].Length (more complex UDT)
We also might be reading arrays, a bool from arrays (atomic), strings.
Oh and multi-dim arrays, program scope tags...
'''
RequestTagData = b""
tagArray = tagName.split(".")
# this loop figures out the packet length and builds our packet
for i in range(len(tagArray)):
if tagArray[i].endswith("]"):
tag, basetag, index = _parseTagName(tagArray[i], 0)
BaseTagLenBytes = len(basetag)
if isBoolArray and i == len(tagArray)-1: index = int(index/32)
# Assemble the packet
RequestTagData += pack('<BB', 0x91, BaseTagLenBytes)
RequestTagData += basetag.encode('utf-8')
if BaseTagLenBytes%2:
BaseTagLenBytes += 1
RequestTagData += pack('<B', 0x00)
BaseTagLenWords = BaseTagLenBytes/2
if i < len(tagArray):
if not isinstance(index, list):
if index < 256:
RequestTagData += pack('<BB', 0x28, index)
if 65536 > index > 255:
RequestTagData += pack('<HH', 0x29, index)
if index > 65535:
RequestTagData += pack('<HI', 0x2A, index)
else:
for i in range(len(index)):
if index[i] < 256:
RequestTagData += pack('<BB', 0x28, index[i])
if 65536 > index[i] > 255:
RequestTagData += pack('<HH', 0x29, index[i])
if index[i] > 65535:
RequestTagData += pack('<HI', 0x2A, index[i])
else:
'''
for non-array segment of tag
the try might be a stupid way of doing this. If the portion of the tag
can be converted to an integer successfully then we must be just looking
for a bit from a word rather than a UDT. So we then don't want to assemble
the read request as a UDT, just read the value of the DINT. We'll figure out
the individual bit in the read function.
'''
try:
if int(tagArray[i]) <= 31:
pass
except:
BaseTagLenBytes = int(len(tagArray[i]))
RequestTagData += pack('<BB', 0x91, BaseTagLenBytes)
RequestTagData += tagArray[i].encode('utf-8')
if BaseTagLenBytes%2:
BaseTagLenBytes += 1
RequestTagData += pack('<B', 0x00)
return RequestTagData
def _addReadIOI(self, tagIOI, elements):
'''
Add the read service to the tagIOI
'''
RequestService = 0x4C
RequestPathSize = int(len(tagIOI)/2)
readIOI = pack('<BB', RequestService, RequestPathSize)
readIOI += tagIOI
readIOI += pack('<H', int(elements))
return readIOI
def _addPartialReadIOI(self, tagIOI, elements):
'''
Add the partial read service to the tag IOI
'''
RequestService = 0x52
RequestPathSize = int(len(tagIOI)/2)
readIOI = pack('<BB', RequestService, RequestPathSize)
readIOI += tagIOI
readIOI += pack('<H', int(elements))
readIOI += pack('<I', self.Offset)
return readIOI
def _addWriteIOI(self, tagIOI, writeData, dataType):
'''
Add the write command stuff to the tagIOI
'''
elementSize = self.CIPTypes[dataType][0]
dataLen = len(writeData)
NumberOfBytes = elementSize*dataLen
RequestNumberOfElements = dataLen
RequestPathSize = int(len(tagIOI)/2)
RequestService = 0x4D
CIPWriteRequest = pack('<BB', RequestService, RequestPathSize)
CIPWriteRequest += tagIOI
if dataType == 160:
RequestNumberOfElements = self.StructIdentifier
TypeCodeLen = 0x02
CIPWriteRequest += pack('<BBHH', dataType, TypeCodeLen, RequestNumberOfElements, len(writeData))
else:
TypeCodeLen = 0x00
CIPWriteRequest += pack('<BBH', dataType, TypeCodeLen, RequestNumberOfElements)
for v in writeData:
try:
for i in range(len(v)):
el = v[i]
CIPWriteRequest += pack(self.CIPTypes[dataType][2],el)
except:
CIPWriteRequest += pack(self.CIPTypes[dataType][2],v)
return CIPWriteRequest
def _addWriteBitIOI(self, tag, tagIOI, writeData, dataType):
'''
This will add the bit level request to the tagIOI
Writing to a bit is handled in a different way than
other writes
'''
elementSize = self.CIPTypes[dataType][0]
dataLen = len(writeData)
NumberOfBytes = elementSize*dataLen
RequestNumberOfElements = dataLen
RequestPathSize = int(len(tagIOI)/2)
RequestService = 0x4E
writeIOI = pack('<BB', RequestService, RequestPathSize)
writeIOI += tagIOI
fmt = self.CIPTypes[dataType][2]
fmt = fmt.upper()
s = tag.split('.')
if dataType == 211:
t = s[len(s)-1]
tag, basetag, bit = _parseTagName(t, 0)
bit %= 32
else:
bit = s[len(s)-1]
bit = int(bit)
writeIOI += pack('<h', NumberOfBytes)
byte = 2**(NumberOfBytes*8)-1
bits = 2**bit
if writeData[0]:
writeIOI += pack(fmt, bits)
writeIOI += pack(fmt, byte)
else:
writeIOI += pack(fmt, 0x00)
writeIOI += pack(fmt, (byte-bits))
return writeIOI
def _buildEIPHeader(self, tagIOI):
'''
The EIP Header contains the tagIOI and the
commands to perform the read or write. This request
will be followed by the reply containing the data
'''
if self.ContextPointer == 155: self.ContextPointer = 0
EIPPayloadLength = 22+len(tagIOI)
EIPConnectedDataLength = len(tagIOI)+2
EIPCommand = 0x70
EIPLength = 22+len(tagIOI)
EIPSessionHandle = self.SessionHandle
EIPStatus = 0x00
EIPContext=context_dict[self.ContextPointer]
self.ContextPointer+=1
EIPOptions = 0x0000
EIPInterfaceHandle = 0x00
EIPTimeout = 0x00
EIPItemCount = 0x02
EIPItem1ID = 0xA1
EIPItem1Length = 0x04
EIPItem1 = self.OTNetworkConnectionID
EIPItem2ID = 0xB1
EIPItem2Length = EIPConnectedDataLength
EIPSequence = self.SequenceCounter
self.SequenceCounter += 1
self.SequenceCounter = self.SequenceCounter%0x10000
EIPHeaderFrame = pack('<HHIIQIIHHHHIHHH',
EIPCommand,
EIPLength,
EIPSessionHandle,
EIPStatus,
EIPContext,
EIPOptions,
EIPInterfaceHandle,
EIPTimeout,
EIPItemCount,
EIPItem1ID,
EIPItem1Length,
EIPItem1,
EIPItem2ID,EIPItem2Length,EIPSequence)
return EIPHeaderFrame+tagIOI
def _buildMultiServiceHeader(self):
'''
Service header for making a multiple tag request
'''
MultiService = 0X0A
MultiPathSize = 0x02
MutliClassType = 0x20
MultiClassSegment = 0x02
MultiInstanceType = 0x24
MultiInstanceSegment = 0x01
return pack('<BBBBBB',
MultiService,
MultiPathSize,
MutliClassType,
MultiClassSegment,
MultiInstanceType,
MultiInstanceSegment)
def _buildTagListRequest(self, programName):
'''
Build the request for the PLC tags
Program scoped tags will pass the program name for the request
'''
Service = 0x55
PathSegment = b""
#If we're dealing with program scoped tags...
if programName:
PathSegment = pack('<BB', 0x91, len(programName)) + programName.encode('utf-8')
# if odd number of characters, need to add a byte to the end.
if len(programName) % 2: PathSegment += pack('<B', 0x00)
PathSegment += pack('<H', 0x6B20)
if self.Offset < 256:
PathSegment += pack('<BB', 0x24, self.Offset)
else:
PathSegment += pack('<HH', 0x25, self.Offset)
PathSegmentLen = int(len(PathSegment)/2)
AttributeCount = 0x03
SymbolType = 0x02
ByteCount = 0x08
SymbolName = 0x01
Attributes = pack('<HHHH', AttributeCount, SymbolName, SymbolType, ByteCount)
TagListRequest = pack('<BB', Service, PathSegmentLen)
TagListRequest += PathSegment + Attributes
return TagListRequest
def _parseReply(self, tag, elements, data):
'''
Gets the replies from the PLC
In the case of BOOL arrays and bits of
a word, we do some reformating
'''
tagName, basetag, index = _parseTagName(tag, 0)
datatype = self.KnownTags[basetag][0]
bitCount = self.CIPTypes[datatype][0] * 8
# if bit of word was requested
if BitofWord(tag):
split_tag = tag.split('.')
bitPos = split_tag[len(split_tag)-1]
bitPos = int(bitPos)
wordCount = _getWordCount(bitPos, elements, bitCount)
words = self._getReplyValues(tag, wordCount, data)
vals = self._wordsToBits(tag, words, count=elements)
elif datatype == 211:
wordCount = _getWordCount(index, elements, bitCount)
words = self._getReplyValues(tag, wordCount, data)
vals = self._wordsToBits(tag, words, count=elements)
else:
vals = self._getReplyValues(tag, elements, data)
if len(vals) == 1:
return vals[0]
else:
return vals
def _getReplyValues(self, tag, elements, data):
'''
Gather up all the values in the reply/replies
'''
status = unpack_from('<B', data, 48)[0]
extendedStatus = unpack_from('<B', data, 49)[0]
elements = int(elements)
if status == 0 or status == 6:
# parse the tag
tagName, basetag, index = _parseTagName(tag, 0)
datatype = self.KnownTags[basetag][0]
CIPFormat = self.CIPTypes[datatype][2]
vals = []
dataSize = self.CIPTypes[datatype][0]
numbytes = len(data)-dataSize
counter = 0
self.Offset = 0
for i in range(elements):
index = 52+(counter*dataSize)
if datatype == 160:
tmp = unpack_from('<h', data, 52)[0]
if tmp == self.StructIdentifier:
# gotta handle strings a little different
index = 54+(counter*dataSize)
NameLength = unpack_from('<L', data, index)[0]
s = data[index+4:index+4+NameLength]
vals.append(str(s.decode('utf-8')))
else:
d = data[index:index+len(data)]
vals.append(d)
elif datatype == 218:
index = 52+(counter*dataSize)
NameLength = unpack_from('<B', data, index)[0]
s = data[index+1:index+1+NameLength]
vals.append(str(s.decode('utf-8')))
else:
returnvalue = unpack_from(CIPFormat, data, index)[0]
vals.append(returnvalue)
self.Offset += dataSize
counter += 1
# re-read because the data is in more than one packet
if index == numbytes and status == 6:
index = 0
counter = 0
tagIOI = self._buildTagIOI(tag, isBoolArray=False)
readIOI = self._addPartialReadIOI(tagIOI, elements)
eipHeader = self._buildEIPHeader(readIOI)
self.Socket.send(eipHeader)
data = self.recv_data()
status = unpack_from('<B', data, 48)[0]
numbytes = len(data)-dataSize
return vals
else: # didn't nail it
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error'
return 'Failed to read tag: {} - {}'.format(tag, err)
def recv_data(self):
'''
When receiving data from the socket, it is possible to receive
incomplete data. The initial packet that comes in contains
the length of the payload. We can use that to keep calling
recv() until the entire payload is received. This only happnens
when using LargeForwardOpen
'''
data = b''
part = self.Socket.recv(4096)
payload_len = unpack_from('<H', part, 2)[0]
data += part
while len(data)-24 < payload_len:
part = self.Socket.recv(4096)
data += part
return data
def _initial_read(self, tag, baseTag, dt):
'''
Store each unique tag read in a dict so that we can retreive the
data type or data length (for STRING) later
'''
# if a tag alread exists, return True
if baseTag in self.KnownTags:
return True
if dt:
self.KnownTags[baseTag] = (dt, 0)
return True
tagData = self._buildTagIOI(baseTag, isBoolArray=False)
readRequest = self._addPartialReadIOI(tagData, 1)
eipHeader = self._buildEIPHeader(readRequest)
# send our tag read request
status, retData = self._getBytes(eipHeader)
# make sure it was successful
if status == 0 or status == 6:
dataType = unpack_from('<B', retData, 50)[0]
dataLen = unpack_from('<H', retData, 2)[0] # this is really just used for STRING
self.KnownTags[baseTag] = (dataType, dataLen)
return True
else:
if status in cipErrorCodes.keys():
err = cipErrorCodes[status]
else:
err = 'Unknown error {}'.format(status)
raise ValueError('Failed to read tag: {}'.format(err))
def _wordsToBits(self, tag, value, count=0):
'''
Convert words to a list of true/false
'''
tagName, basetag, index = _parseTagName(tag, 0)
datatype = self.KnownTags[basetag][0]
bitCount = self.CIPTypes[datatype][0] * 8
if datatype == 211:
bitPos = index%32
else:
split_tag = tag.split('.')
bitPos = split_tag[len(split_tag)-1]
bitPos = int(bitPos)
ret = []
for v in value:
for i in range(0,bitCount):
ret.append(BitValue(v, i))
return ret[bitPos:bitPos+count]
def _multiParser(self, tags, data):
'''
Takes multi read reply data and returns an array of the values
'''
# remove the beginning of the packet because we just don't care about it
stripped = data[50:]
tagCount = unpack_from('<H', stripped, 0)[0]
# get the offset values for each of the tags in the packet
reply = []
for i, tag in enumerate(tags):
if isinstance(tag, (list, tuple)):
tag = tag[0]
loc = 2+(i*2)
offset = unpack_from('<H', stripped, loc)[0]
replyStatus = unpack_from('<b', stripped, offset+2)[0]
replyExtended = unpack_from('<b', stripped, offset+3)[0]
# successful reply, add the value to our list
if replyStatus == 0 and replyExtended == 0:
dataTypeValue = unpack_from('<B', stripped, offset+4)[0]
# if bit of word was requested
if BitofWord(tag):
dataTypeFormat = self.CIPTypes[dataTypeValue][2]
val = unpack_from(dataTypeFormat, stripped, offset+6)[0]
bitState = _getBitOfWord(tag, val)
reply.append(bitState)
elif dataTypeValue == 211:
dataTypeFormat = self.CIPTypes[dataTypeValue][2]
val = unpack_from(dataTypeFormat, stripped, offset+6)[0]
bitState = _getBitOfWord(tag, val)
reply.append(bitState)
elif dataTypeValue == 160:
strlen = unpack_from('<B', stripped, offset+8)[0]
s = stripped[offset+12:offset+12+strlen]
reply.append(str(s.decode('utf-8')))
else:
dataTypeFormat = self.CIPTypes[dataTypeValue][2]
reply.append(unpack_from(dataTypeFormat, stripped, offset+6)[0])
else:
reply.append("Error")
return reply
def _buildListIdentity(self):
'''
Build the list identity request for discovering Ethernet I/P
devices on the network
'''
ListService = 0x63
ListLength = 0x00
ListSessionHandle = 0x00
ListStatus = 0x00
ListResponse = 0xFA
ListContext1 = 0x6948
ListContext2 = 0x6f4d
ListContext3 = 0x006d
ListOptions = 0x00
return pack("<HHIIHHHHI",
ListService,
ListLength,
ListSessionHandle,
ListStatus,
ListResponse,
ListContext1,
ListContext2,
ListContext3,
ListOptions)
def _extractTagPacket(self, data, programName):
# the first tag in a packet starts at byte 50
packetStart = 50
while packetStart < len(data):
# get the length of the tag name
tagLen = unpack_from('<H', data, packetStart+4)[0]
# get a single tag from the packet
packet = data[packetStart:packetStart+tagLen+20]
# extract the offset
self.Offset = unpack_from('<H', packet, 0)[0]
# add the tag to our tag list
tag = parseLgxTag(packet, programName)
# filter out garbage
if '__DEFVAL_' in tag.TagName:
pass
elif 'Routine:' in tag.TagName:
pass
elif 'Map:' in tag.TagName:
pass
elif 'Task:' in tag.TagName:
pass
else:
self.TagList.append(tag)
if not programName:
if 'Program:' in tag.TagName:
programNames.append(tag.TagName)
# increment ot the next tag in the packet
packetStart = packetStart+tagLen+20
def _makeString(self, string):
work = []
if self.Micro800:
temp = pack('<B', len(string)).decode('utf-8')
else:
temp = pack('<I', len(string)).decode('utf-8')
for char in temp:
work.append(ord(char))
for char in string:
work.append(ord(char))
if not self.Micro800:
for x in range(len(string), 84):
work.append(0x00)
return work
def _getBitOfWord(tag, value):
'''
Takes a tag name, gets the bit from the end of
it, then returns that bits value
'''
split_tag = tag.split('.')
stripped = split_tag[len(split_tag)-1]
if stripped.endswith(']'):
val = stripped[stripped.find("[")+1:stripped.find("]")]
val = int(val)
bitPos = val & 0x1f
returnValue = BitValue(value, bitPos)
else:
try:
bitPos = int(stripped)
if bitPos <= 31:
returnValue = BitValue(value, bitPos)
except:
pass
return returnValue
def _getWordCount(start, length, bits):
'''
Get the number of words that the requested
bits would occupy. We have to take into account
how many bits are in a word and the fact that the
number of requested bits can span multipe words.
'''
newStart = start % bits
newEnd = newStart + length
totalWords = (newEnd-1) / bits
return totalWords + 1
def _parseTagName(tag, offset):
'''
parse the packet to get the base tag name
the offset is so that we can increment the array pointer if need be
'''
bt = tag
ind = 0
try:
if tag.endswith(']'):
pos = (len(tag)-tag.rindex("["))# find position of [
bt = tag[:-pos] # remove [x]: result=SuperDuper
temp = tag[-pos:] # remove tag: result=[x]
ind = temp[1:-1] # strip the []: result=x
s = ind.split(',') # split so we can check for multi dimensin array
if len(s) == 1:
ind = int(ind)
newTagName = bt+'['+str(ind+offset)+']'
else:
# if we have a multi dim array, return the index
ind = []
for i in range(len(s)):
s[i] = int(s[i])
ind.append(s[i])
else:
pass
return tag, bt, ind
except:
return tag, bt, 0
def BitofWord(tag):
'''
Test if the user is trying to write to a bit of a word
ex. Tag.1 returns True (Tag = DINT)
'''
s = tag.split('.')
if s[len(s)-1].isdigit():
return True
else:
return False
def BitValue(value, bitno):
'''
Returns the specific bit of a words value
'''
mask = 1 << bitno
if (value & mask):
return True
else:
return False
def _parseIdentityResponse(data):
# we're going to take the packet and parse all
# the data that is in it.
resp = LGXDevice()
resp.Length = unpack_from('<H', data, 28)[0]
resp.EncapsulationVersion = unpack_from('<H', data, 30)[0]
longIP = unpack_from('<I', data, 36)[0]
resp.IPAddress = socket.inet_ntoa(pack('<L', longIP))
resp.VendorID = unpack_from('<H', data, 48)[0]
resp.Vendor = GetVendor(resp.VendorID)
resp.DeviceID = unpack_from('<H', data, 50)[0]
resp.Device = GetDevice(resp.DeviceID)
resp.ProductCode = unpack_from('<H', data, 52)[0]
major = unpack_from('<B', data, 54)[0]
minor = unpack_from('<B', data, 55)[0]
resp.Revision = str(major) + '.' + str(minor)
resp.Status = unpack_from('<H', data, 56)[0]
resp.SerialNumber = hex(unpack_from('<I', data, 58)[0])
resp.ProductNameLength = unpack_from('<B', data, 62)[0]
resp.ProductName = str(data[63:63+resp.ProductNameLength].decode('utf-8'))
state = data[-1:]
resp.State = unpack_from('<B', state, 0)[0]
return resp
def parseLgxTag(packet, programName):
t = LgxTag()
length = unpack_from('<H', packet, 4)[0]
name = packet[6:length+6].decode('utf-8')
if programName:
t.TagName = str(programName + '.' + name)
else:
t.TagName = str(name)
t.InstanceID = unpack_from('<H', packet, 0)[0]
val = unpack_from('<H', packet, length+6)[0]
t.SymbolType = val & 0xff
t.DataTypeValue = val & 0xfff
t.Array = (val & 0x6000) >> 13
t.Struct = (val & 0x8000) >> 15
if t.Array:
t.Size = unpack_from('<H', packet, length+8)[0]
else:
t.Size = 0
return t
class LgxTag:
def __init__(self):
self.TagName = ''
self.InstanceID = 0x00
self.SymbolType = 0x00
self.DataTypeValue = 0x00
self.DataType = ''
self.Array = 0x00
self.Struct = 0x00
self.Size = 0x00
cipErrorCodes = {0x00: 'Success',
0x01: 'Connection failure',
0x02: 'Resource unavailable',
0x03: 'Invalid parameter value',
0x04: 'Path segment error',
0x05: 'Path destination unknown',
0x06: 'Partial transfer',
0x07: 'Connection lost',
0x08: 'Service not supported',
0x09: 'Invalid Attribute',
0x0A: 'Attribute list error',
0x0B: 'Already in requested mode/state',
0x0C: 'Object state conflict',
0x0D: 'Object already exists',
0x0E: 'Attribute not settable',
0x0F: 'Privilege violation',
0x10: 'Device state conflict',
0x11: 'Reply data too large',
0x12: 'Fragmentation of a premitive value',
0x13: 'Not enough data',
0x14: 'Attribute not supported',
0x15: 'Too much data',
0x16: 'Object does not exist',
0x17: 'Service fragmentation sequence not in progress',
0x18: 'No stored attribute data',
0x19: 'Store operation failure',
0x1A: 'Routing failure, request packet too large',
0x1B: 'Routing failure, response packet too large',
0x1C: 'Missing attribute list entry data',
0x1D: 'Invalid attribute value list',
0x1E: 'Embedded service error',
0x1F: 'Vendor specific',
0x20: 'Invalid Parameter',
0x21: 'Write once value or medium already written',
0x22: 'Invalid reply received',
0x23: 'Buffer overflow',
0x24: 'Invalid message format',
0x25: 'Key failure in path',
0x26: 'Path size invalid',
0x27: 'Unexpected attribute in list',
0x28: 'Invalid member ID',
0x29: 'Member not settable',
0x2A: 'Group 2 only server general failure',
0x2B: 'Unknown Modbus error',
0x2C: 'Attribute not gettable'}
# Context values passed to the PLC when reading/writing
context_dict = {0: 0x6572276557,
1: 0x6f6e,
2: 0x676e61727473,
3: 0x737265,
4: 0x6f74,
5: 0x65766f6c,
6: 0x756f59,
7: 0x776f6e6b,
8: 0x656874,
9: 0x73656c7572,
10: 0x646e61,
11: 0x6f73,
12: 0x6f64,
13: 0x49,
14: 0x41,
15: 0x6c6c7566,
16: 0x74696d6d6f63,
17: 0x7327746e656d,
18: 0x74616877,
19: 0x6d2749,
20: 0x6b6e696874,
21: 0x676e69,
22: 0x666f,
23: 0x756f59,
24: 0x746e646c756f77,
25: 0x746567,
26: 0x73696874,
27: 0x6d6f7266,
28: 0x796e61,
29: 0x726568746f,
30: 0x797567,
31: 0x49,
32: 0x7473756a,
33: 0x616e6e6177,
34: 0x6c6c6574,
35: 0x756f79,
36: 0x776f68,
37: 0x6d2749,
38: 0x676e696c656566,
39: 0x6174746f47,
40: 0x656b616d,
41: 0x756f79,
42: 0x7265646e75,
43: 0x646e617473,
44: 0x726576654e,
45: 0x616e6e6f67,
46: 0x65766967,
47: 0x756f79,
48: 0x7075,
49: 0x726576654e,
50: 0x616e6e6f67,
51: 0x74656c,
52: 0x756f79,
53: 0x6e776f64,
54: 0x726576654e,
55: 0x616e6e6f67,
56: 0x6e7572,
57: 0x646e756f7261,
58: 0x646e61,
59: 0x747265736564,
60: 0x756f79,
61: 0x726576654e,
62: 0x616e6e6f67,
63: 0x656b616d,
64: 0x756f79,
65: 0x797263,
66: 0x726576654e,
67: 0x616e6e6f67,
68: 0x796173,
69: 0x657962646f6f67,
70: 0x726576654e,
71: 0x616e6e6f67,
72: 0x6c6c6574,
73: 0x61,
74: 0x65696c,
75: 0x646e61,
76: 0x74727568,
77: 0x756f79,
78: 0x6576276557,
79: 0x6e776f6e6b,
80: 0x68636165,
81: 0x726568746f,
82: 0x726f66,
83: 0x6f73,
84: 0x676e6f6c,
85: 0x72756f59,
86: 0x73277472616568,
87: 0x6e656562,
88: 0x676e69686361,
89: 0x747562,
90: 0x657227756f59,
91: 0x6f6f74,
92: 0x796873,
93: 0x6f74,
94: 0x796173,
95: 0x7469,
96: 0x656469736e49,
97: 0x6577,
98: 0x68746f62,
99: 0x776f6e6b,
100: 0x732774616877,
101: 0x6e656562,
102: 0x676e696f67,
103: 0x6e6f,
104: 0x6557,
105: 0x776f6e6b,
106: 0x656874,
107: 0x656d6167,
108: 0x646e61,
109: 0x6572276577,
110: 0x616e6e6f67,
111: 0x79616c70,
112: 0x7469,
113: 0x646e41,
114: 0x6669,
115: 0x756f79,
116: 0x6b7361,
117: 0x656d,
118: 0x776f68,
119: 0x6d2749,
120: 0x676e696c656566,
121: 0x74276e6f44,
122: 0x6c6c6574,
123: 0x656d,
124: 0x657227756f79,
125: 0x6f6f74,
126: 0x646e696c62,
127: 0x6f74,
128: 0x656573,
129: 0x726576654e,
130: 0x616e6e6f67,
131: 0x65766967,
132: 0x756f79,
133: 0x7075,
134: 0x726576654e,
135: 0x616e6e6f67,
136: 0x74656c,
137: 0x756f79,
138: 0x6e776f64,
139: 0x726576654e,
140: 0x6e7572,
141: 0x646e756f7261,
142: 0x646e61,
143: 0x747265736564,
144: 0x756f79,
145: 0x726576654e,
146: 0x616e6e6f67,
147: 0x656b616d,
148: 0x756f79,
149: 0x797263,
150: 0x726576654e,
151: 0x616e6e6f67,
152: 0x796173,
153: 0x657962646f6f67,
154: 0x726576654e,
155: 0xa680e2616e6e6f67}
| 35.521806
| 115
| 0.503004
|
26aa17d4ad757d16d85af83a63d06e383faf6c20
| 739
|
py
|
Python
|
pyefun/systemProcessingBase.py
|
liguoqing-byte/pyefun
|
773b89944aae5beb060d75f21cf1553bb359660a
|
[
"Apache-2.0"
] | null | null | null |
pyefun/systemProcessingBase.py
|
liguoqing-byte/pyefun
|
773b89944aae5beb060d75f21cf1553bb359660a
|
[
"Apache-2.0"
] | null | null | null |
pyefun/systemProcessingBase.py
|
liguoqing-byte/pyefun
|
773b89944aae5beb060d75f21cf1553bb359660a
|
[
"Apache-2.0"
] | null | null | null |
"""
.. Hint::
系统处理
.. literalinclude:: ../../../pyefun/systemProcessingBase_test.py
:language: python
:caption: 代码示例
:linenos:
"""
import time
import os
import platform
def 系统_是否为window系统():
return platform.system().lower() == 'windows'
def 系统_是否为linux系统():
return platform.system().lower() == 'linux'
def 系统_是否为mac系统():
return platform.system().lower() == 'darwin'
# 运行
# 打开内存文件
# 取剪辑板文本
# 置剪辑板文本
# 剪辑板中可有文本
# 清除剪辑板
# 取屏幕宽度
# 取屏幕高度
# 取鼠标水平位置
# 取鼠标垂直位置
# 取颜色数
# 输入框
# 信息框
# 取文本注册项
# 取数值注册项
# 取字节集注册
# 写注册项
# 删除注册项
# 注册项是否存在
# 取默认底色
# 快照
# 读配置项
# 写配置项
# 取配置节名
# 取操作系统类别
# 多文件对话框
def 延时(秒: int):
time.sleep(秒)
def 运行(cmd):
p = os.popen(cmd)
x = p.read()
p.close()
return x
| 10.408451
| 64
| 0.608931
|
a384df9531cad8ac8918981c727f5a2182ad016c
| 1,097
|
py
|
Python
|
invirtualenv_plugins/rpm_scripts/pre_uninstall.py
|
BenLloydPearson/invirtualenv
|
5103eece3a998499fa260413ef7e57baa57555c4
|
[
"BSD-3-Clause"
] | null | null | null |
invirtualenv_plugins/rpm_scripts/pre_uninstall.py
|
BenLloydPearson/invirtualenv
|
5103eece3a998499fa260413ef7e57baa57555c4
|
[
"BSD-3-Clause"
] | null | null | null |
invirtualenv_plugins/rpm_scripts/pre_uninstall.py
|
BenLloydPearson/invirtualenv
|
5103eece3a998499fa260413ef7e57baa57555c4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import print_function
import logging
import os
import shutil
import sys
from invirtualenv.deploy import unlink_deployed_bin_files
from invirtualenv.config import get_configuration_dict
if __name__ == "__main__":
# /usr/share/<packagename_version>/package_scripts
this_script_dir = os.path.dirname(os.path.realpath(__file__))
path_bits = this_script_dir.split(os.path.sep)
# Remove leading package_scripts from the path
path_bits.remove('package_scripts')
# /usr/share/<packagename_version>/
data_dir = os.path.sep.join(path_bits)
deploy_conf = os.path.join(data_dir, 'deploy.conf')
if not os.path.exists(deploy_conf):
print("No 'deploy.conf' found. Doing nothing", file=sys.stderr)
sys.exit(0)
config = get_configuration_dict(deploy_conf)
venv_dir = config['global'].get('virtualenv_deploy_dir', None)
if venv_dir and os.path.exists(venv_dir):
unlink_deployed_bin_files(venv_dir)
logging.debug('Removing virtualenv directory %r' % venv_dir)
shutil.rmtree(venv_dir)
| 35.387097
| 72
| 0.740201
|
6f38befcc863f9f0504dac65b12395f691150084
| 25,154
|
py
|
Python
|
app/navigation.py
|
cds-snc/notification-admin
|
d4056798bf889ad29893667bbb67ead2f8e466e4
|
[
"MIT"
] | 16
|
2019-11-05T21:35:49.000Z
|
2022-01-12T15:00:32.000Z
|
app/navigation.py
|
cds-snc/notification-admin
|
d4056798bf889ad29893667bbb67ead2f8e466e4
|
[
"MIT"
] | 509
|
2019-07-11T22:03:19.000Z
|
2022-03-30T15:19:26.000Z
|
app/navigation.py
|
cds-snc/notification-admin
|
d4056798bf889ad29893667bbb67ead2f8e466e4
|
[
"MIT"
] | 8
|
2020-02-21T20:19:29.000Z
|
2022-03-31T14:17:02.000Z
|
from itertools import chain
from flask import request
class Navigation:
mapping: dict = {}
exclude: set = set()
selected_attribute = "class=selected"
def __init__(self):
self.mapping = {
navigation: {"main.{}".format(endpoint) for endpoint in endpoints} for navigation, endpoints in self.mapping.items()
}
@property
def endpoints_with_navigation(self):
return tuple(chain.from_iterable((endpoints for navigation_item, endpoints in self.mapping.items())))
@property
def endpoints_without_navigation(self):
return tuple("main.{}".format(endpoint) for endpoint in self.exclude) + (
"static",
"status.show_status",
)
def is_selected(self, navigation_item):
if request.endpoint in self.mapping[navigation_item]:
return self.selected_attribute
return ""
class AdminNavigation(Navigation):
selected_attribute = "active"
mapping = {
"choose_account": {
"choose_account",
},
"live_services": {
"live_services",
},
"trial_services": {
"trial_services",
},
"organisations": {
"organisations",
},
"live_api_keys": {
"live_api_keys",
},
"email_branding": {
"email_branding",
},
"find_services_by_name": {
"find_services_by_name",
},
"find_users_by_email": {
"find_users_by_email",
},
"platform_admin_list_complaints": {
"platform_admin_list_complaints",
},
"platform_admin_reports": {
"platform_admin_reports",
},
"inbound_sms_admin": {
"inbound_sms_admin",
},
"view_providers": {
"view_providers",
},
"clear_cache": {
"clear_cache",
},
}
class HeaderNavigation(Navigation):
selected_attribute = "active"
mapping = {
"dashboard": {
"monthly",
"service_dashboard",
"template_usage",
"view_job",
"view_jobs",
"view_notification",
"view_notifications",
},
"support": {
"set_lang",
"contact",
},
"features": {
"features",
},
"home": {
"index",
},
"why-notify": {
"why-notify",
},
"contact": {
"contact",
},
"pricing": {
"pricing",
},
"documentation": {
"documentation",
},
"design_content": {
"design_content",
},
"user-profile": {
"user_profile",
"user_profile_email",
"user_profile_email_authenticate",
"user_profile_email_confirm",
"user_profile_mobile_number",
"user_profile_mobile_number_authenticate",
"user_profile_mobile_number_confirm",
"user_profile_name",
"user_profile_password",
"user_profile_disable_platform_admin_view",
},
"sign-in": {
"sign_in",
"two_factor_sms_sent",
"two_factor_email_sent",
"verify",
"verify_email",
"verify_mobile",
},
"choose_account": {
"choose_account",
},
"team-members": {
"confirm_edit_user_email",
"confirm_edit_user_mobile_number",
"edit_user_email",
"edit_user_mobile_number",
"edit_user_permissions",
"invite_user",
"manage_users",
"remove_user_from_service",
},
"templates": {
"action_blocked",
"add_service_template",
"check_messages",
"check_notification",
"choose_template",
"choose_template_to_copy",
"confirm_redact_template",
"copy_template",
"delete_service_template",
"edit_service_template",
"edit_template_postage",
"manage_template_folder",
"s3_send",
"send_messages",
"send_one_off",
"send_one_off_step",
"send_test",
"send_test_preview",
"send_test_step",
"set_sender",
"set_template_sender",
"view_template",
"view_template_version",
"view_template_versions",
},
"api-integration": {
"api_callbacks",
"api_documentation",
"api_integration",
"api_keys",
"create_api_key",
"delivery_status_callback",
"received_text_messages_callback",
"revoke_api_key",
"safelist",
},
"settings": {
"branding_request",
"link_service_to_organisation",
"request_letter_branding",
"request_to_go_live",
"service_add_email_reply_to",
"service_add_letter_contact",
"service_add_sms_sender",
"service_agreement",
"service_accept_agreement",
"service_confirm_agreement",
"service_confirm_delete_email_reply_to",
"service_confirm_delete_letter_contact",
"service_confirm_delete_sms_sender",
"service_edit_email_reply_to",
"service_edit_letter_contact",
"service_edit_sms_sender",
"service_email_reply_to",
"service_letter_contact_details",
"service_make_blank_default_letter_contact",
"service_name_change",
"service_name_change_confirm",
"service_email_from_change",
"service_email_from_change_confirm",
"service_preview_email_branding",
"service_preview_letter_branding",
"service_set_auth_type",
"service_set_channel",
"service_set_contact_link",
"service_set_email_branding",
"service_set_inbound_number",
"service_set_inbound_sms",
"service_set_international_sms",
"service_set_letter_contact_block",
"service_set_letters",
"service_set_reply_to_email",
"service_set_sms_prefix",
"service_verify_reply_to_address",
"service_verify_reply_to_address_updates",
"service_settings",
"service_sms_senders",
"set_message_limit",
"set_free_sms_allowance",
"service_set_letter_branding",
"submit_request_to_go_live",
},
"sent-messages": {
"view_notifications",
"view_notification",
},
"bulk-sends": {
"view_jobs",
"view_job",
},
}
exclude = {
"accept_invite",
"accept_org_invite",
"add_data_retention",
"add_inbound_sms_admin",
"add_organisation",
"add_service",
"archive_service",
"callbacks",
"cancel_invited_org_user",
"cancel_invited_user",
"cancel_job",
"cancel_letter",
"cancel_letter_job",
"check_and_resend_text_code",
"check_and_resend_verification_code",
"check_messages_preview",
"check_notification_preview",
"choose_account",
"choose_service",
"confirm_edit_organisation_name",
"data_retention",
"delete_template_folder",
"design_content",
"download_notifications_csv",
"edit_data_retention",
"edit_organisation_agreement",
"edit_organisation_crown_status",
"edit_organisation_domains",
"edit_organisation_email_branding",
"edit_organisation_letter_branding",
"edit_organisation_go_live_notes",
"edit_organisation_name",
"edit_organisation_type",
"edit_provider",
"edit_user_org_permissions",
"email_not_received",
"email_template",
"error",
"forgot_password",
"get_example_csv",
"get_notifications_as_json",
"go_to_dashboard_after_tour",
"inbound_sms_admin",
"invite_org_user",
"letter_branding_preview_image",
"letter_template",
"manage_org_users",
"new_password",
"redirect_contact",
"redirect_service_dashboard",
"redirect_terms",
"redirect_roadmap",
"redirect_email",
"redirect_sms",
"redirect_letters",
"redirect_templates",
"redirect_security",
"redirect_messages_status",
"organisation_dashboard",
"organisation_trial_mode_services",
"organisation_settings",
"organisation_preview_email_branding",
"organisation_preview_letter_branding",
"organisations",
"privacy",
"public_agreement",
"public_download_agreement",
"redact_template",
"register",
"register_from_invite",
"register_from_org_invite",
"registration_continue",
"remove_user_from_organisation",
"remove_user_from_service",
"request_letter_branding",
"request_to_go_live",
"terms_of_use",
"use_case",
"resend_email_link",
"resend_email_verification",
"resume_service",
"robots",
"security_txt",
"send_notification",
"service_dashboard",
"service_dashboard_updates",
"service_delete_email_reply_to",
"service_delete_letter_contact",
"service_delete_sms_sender",
"service_download_agreement",
"service_letter_validation_preview",
"service_switch_upload_document",
"service_switch_count_as_live",
"service_switch_live",
"service_set_permission",
"services_or_dashboard",
"show_accounts_or_dashboard",
"sign_out",
"start_job",
"start_tour",
"styleguide",
"temp_service_history",
"template_history",
"user_profile_authenticate_security_keys",
"user_profile_complete_security_keys",
"user_profile_validate_security_keys",
"user_profile_add_security_keys",
"user_profile_security_keys",
"user_profile_security_keys_confirm_delete",
"uploads",
"usage",
"view_job_csv",
"view_job_updates",
"view_letter_notification_as_preview",
"view_letter_template_preview",
"view_notification_updates",
"view_notifications_csv",
"view_template_version_preview",
"safelist",
"get_template_data",
"block_user",
"unblock_user",
"service_sending_domain",
"welcome",
}
class MainNavigation(Navigation):
mapping = {
"dashboard": {
"monthly",
"service_dashboard",
"template_usage",
"view_job",
"view_jobs",
"view_notification",
"view_notifications",
},
"templates": {
"action_blocked",
"add_service_template",
"check_messages",
"check_notification",
"choose_template",
"choose_template_to_copy",
"confirm_redact_template",
"copy_template",
"delete_service_template",
"edit_service_template",
"edit_template_postage",
"manage_template_folder",
"s3_send",
"send_messages",
"send_one_off",
"send_one_off_step",
"send_test",
"send_test_preview",
"send_test_step",
"set_sender",
"set_template_sender",
"view_template",
"view_template_version",
"view_template_versions",
"welcome",
},
"uploads": {
"uploads",
},
"team-members": {
"confirm_edit_user_email",
"confirm_edit_user_mobile_number",
"edit_user_email",
"edit_user_mobile_number",
"edit_user_permissions",
"invite_user",
"manage_users",
"remove_user_from_service",
},
"usage": {
"usage",
},
"settings": {
"branding_request",
"link_service_to_organisation",
"request_letter_branding",
"request_to_go_live",
"terms_of_use",
"use_case",
"service_add_email_reply_to",
"service_add_letter_contact",
"service_add_sms_sender",
"service_agreement",
"service_accept_agreement",
"service_confirm_agreement",
"service_confirm_delete_email_reply_to",
"service_confirm_delete_letter_contact",
"service_confirm_delete_sms_sender",
"service_edit_email_reply_to",
"service_edit_letter_contact",
"service_edit_sms_sender",
"service_email_reply_to",
"service_letter_contact_details",
"service_make_blank_default_letter_contact",
"service_name_change",
"service_name_change_confirm",
"service_email_from_change",
"service_email_from_change_confirm",
"service_preview_email_branding",
"service_preview_letter_branding",
"service_set_auth_type",
"service_set_channel",
"service_set_contact_link",
"service_set_email_branding",
"service_set_inbound_number",
"service_set_inbound_sms",
"service_set_international_sms",
"service_set_letter_contact_block",
"service_set_letters",
"service_set_reply_to_email",
"service_set_sms_prefix",
"service_verify_reply_to_address",
"service_verify_reply_to_address_updates",
"service_settings",
"service_sms_senders",
"set_message_limit",
"set_free_sms_allowance",
"service_set_letter_branding",
"submit_request_to_go_live",
},
"api-integration": {
"api_callbacks",
"api_documentation",
"api_integration",
"api_keys",
"create_api_key",
"delivery_status_callback",
"received_text_messages_callback",
"revoke_api_key",
"safelist",
},
"choose_account": {
"choose_account",
},
"live_services": {
"live_services",
},
"user_profile": {
"user_profile",
},
"sign_out": {"sign_out"},
}
exclude = {
"accept_invite",
"accept_org_invite",
"add_data_retention",
"add_inbound_sms_admin",
"add_organisation",
"add_service",
"archive_service",
"archive_user",
"clear_cache",
"create_email_branding",
"create_letter_branding",
"email_branding",
"find_services_by_name",
"find_users_by_email",
"letter_branding",
"live_api_keys",
"live_services",
"live_services_csv",
"notifications_sent_by_service",
"performance_platform_xlsx",
"send_method_stats_by_service",
"trial_report_csv",
"usage_for_all_services",
"platform_admin",
"platform_admin_letter_validation_preview",
"platform_admin_list_complaints",
"platform_admin_reports",
"platform_admin_returned_letters",
"suspend_service",
"trial_services",
"update_email_branding",
"update_letter_branding",
"user_information",
"view_provider",
"view_providers",
"welcome",
}
class OrgNavigation(Navigation):
mapping = {
"dashboard": {
"organisation_dashboard",
},
"settings": {
"confirm_edit_organisation_name",
"edit_organisation_agreement",
"edit_organisation_crown_status",
"edit_organisation_domains",
"edit_organisation_email_branding",
"edit_organisation_letter_branding",
"edit_organisation_domains",
"edit_organisation_go_live_notes",
"edit_organisation_name",
"edit_organisation_type",
"organisation_preview_email_branding",
"organisation_preview_letter_branding",
"organisation_settings",
},
"team-members": {
"edit_user_org_permissions",
"invite_org_user",
"manage_org_users",
"remove_user_from_organisation",
},
"trial-services": {
"organisation_trial_mode_services",
},
}
exclude = {
"accept_invite",
"accept_org_invite",
"a11y",
"action_blocked",
"add_data_retention",
"add_inbound_sms_admin",
"add_organisation",
"add_service",
"add_service_template",
"api_callbacks",
"api_documentation",
"api_integration",
"api_keys",
"archive_service",
"archive_user",
"branding_request",
"callbacks",
"cancel_invited_org_user",
"cancel_invited_user",
"cancel_job",
"cancel_letter",
"cancel_letter_job",
"check_and_resend_text_code",
"check_and_resend_verification_code",
"check_messages",
"check_messages_preview",
"check_notification",
"check_notification_preview",
"choose_account",
"choose_service",
"choose_template",
"choose_template_to_copy",
"clear_cache",
"confirm_edit_user_email",
"confirm_edit_user_mobile_number",
"confirm_redact_template",
"copy_template",
"create_api_key",
"create_email_branding",
"create_letter_branding",
"data_retention",
"delete_service_template",
"delete_template_folder",
"delivery_status_callback",
"design_content",
"documentation",
"download_notifications_csv",
"edit_data_retention",
"edit_provider",
"edit_service_template",
"edit_template_postage",
"edit_user_email",
"edit_user_mobile_number",
"edit_user_permissions",
"email_branding",
"email_not_received",
"email_template",
"error",
"features",
"email",
"letters",
"templates",
"sms",
"contact",
"find_services_by_name",
"find_users_by_email",
"forgot_password",
"get_example_csv",
"get_notifications_as_json",
"go_to_dashboard_after_tour",
"inbound_sms_admin",
"index",
"invite_user",
"letter_branding",
"letter_branding_preview_image",
"letter_template",
"link_service_to_organisation",
"live_api_keys",
"live_services",
"live_services_csv",
"manage_template_folder",
"manage_users",
"messages_status",
"new_password",
"notifications_sent_by_service",
"redirect_contact",
"redirect_roadmap",
"redirect_service_dashboard",
"redirect_terms",
"redirect_email",
"redirect_sms",
"redirect_letters",
"redirect_templates",
"redirect_security",
"redirect_messages_status",
"organisations",
"performance_platform_xlsx",
"send_method_stats_by_service",
"trial_report_csv",
"platform_admin",
"platform_admin_letter_validation_preview",
"platform_admin_list_complaints",
"platform_admin_reports",
"platform_admin_returned_letters",
"pricing",
"privacy",
"public_agreement",
"public_download_agreement",
"received_text_messages_callback",
"redact_template",
"register",
"register_from_invite",
"register_from_org_invite",
"registration_continue",
"remove_user_from_service",
"request_letter_branding",
"request_to_go_live",
"terms_of_use",
"use_case",
"resend_email_link",
"resend_email_verification",
"resume_service",
"revoke_api_key",
"roadmap",
"robots",
"s3_send",
"security",
"security_txt",
"send_messages",
"send_notification",
"send_one_off",
"send_one_off_step",
"send_test",
"send_test_preview",
"send_test_step",
"service_add_email_reply_to",
"service_add_letter_contact",
"service_add_sms_sender",
"service_agreement",
"service_accept_agreement",
"service_confirm_agreement",
"service_confirm_delete_email_reply_to",
"service_confirm_delete_letter_contact",
"service_confirm_delete_sms_sender",
"service_dashboard",
"service_dashboard_updates",
"service_delete_email_reply_to",
"service_delete_letter_contact",
"service_delete_sms_sender",
"service_download_agreement",
"service_edit_email_reply_to",
"service_edit_letter_contact",
"service_edit_sms_sender",
"service_email_reply_to",
"service_letter_contact_details",
"service_letter_validation_preview",
"service_make_blank_default_letter_contact",
"service_name_change",
"service_name_change_confirm",
"service_email_from_change",
"service_email_from_change_confirm",
"service_preview_email_branding",
"service_preview_letter_branding",
"service_set_auth_type",
"service_set_channel",
"service_set_contact_link",
"service_set_email_branding",
"service_set_inbound_number",
"service_set_inbound_sms",
"service_set_international_sms",
"service_set_letter_contact_block",
"service_set_letters",
"service_set_reply_to_email",
"service_set_sms_prefix",
"service_settings",
"service_sms_senders",
"service_switch_upload_document",
"service_switch_count_as_live",
"service_switch_live",
"service_set_permission",
"service_verify_reply_to_address",
"service_verify_reply_to_address_updates",
"services_or_dashboard",
"set_message_limit",
"set_free_sms_allowance",
"service_set_letter_branding",
"set_lang",
"set_sender",
"set_template_sender",
"show_accounts_or_dashboard",
"sign_in",
"sign_out",
"start_job",
"start_tour",
"activity",
"activity_download",
"styleguide",
"submit_request_to_go_live",
"suspend_service",
"temp_service_history",
"template_history",
"template_usage",
"terms",
"trial_services",
"two_factor_sms_sent",
"two_factor_email_sent",
"update_email_branding",
"update_letter_branding",
"uploads",
"usage",
"usage_for_all_services",
"user_information",
"user_profile",
"user_profile_authenticate_security_keys",
"user_profile_complete_security_keys",
"user_profile_validate_security_keys",
"user_profile_add_security_keys",
"user_profile_security_keys",
"user_profile_security_keys_confirm_delete",
"user_profile_email",
"user_profile_email_authenticate",
"user_profile_email_confirm",
"user_profile_mobile_number",
"user_profile_mobile_number_authenticate",
"user_profile_mobile_number_confirm",
"user_profile_name",
"user_profile_password",
"user_profile_disable_platform_admin_view",
"verify",
"verify_email",
"verify_mobile",
"view_job",
"view_job_csv",
"view_job_updates",
"view_jobs",
"view_letter_notification_as_preview",
"view_letter_template_preview",
"view_notification",
"view_notification_updates",
"view_notifications",
"view_notifications_csv",
"view_provider",
"view_providers",
"view_template",
"view_template_version",
"view_template_version_preview",
"view_template_versions",
"why-notify",
"safelist",
"get_template_data",
"block_user",
"unblock_user",
"service_sending_domain",
"welcome",
}
| 30.526699
| 128
| 0.57637
|
f385e56e9ef6a4355d833a7d21dfc036ff777f6b
| 1,092
|
py
|
Python
|
tsengine/util/operations.py
|
ccccxjin/TsEngine
|
5f8deed436eb9756be40f78a7bf52be9e910b501
|
[
"MIT"
] | 1
|
2020-07-10T09:11:38.000Z
|
2020-07-10T09:11:38.000Z
|
tsengine/util/operations.py
|
ccccxjin/tsengine
|
5f8deed436eb9756be40f78a7bf52be9e910b501
|
[
"MIT"
] | null | null | null |
tsengine/util/operations.py
|
ccccxjin/tsengine
|
5f8deed436eb9756be40f78a7bf52be9e910b501
|
[
"MIT"
] | null | null | null |
from ._collections import _escape_mapper_args
class F:
__slots__ = 'name'
def __init__(self, name):
self.name = name
def __lt__(self, other):
other = _escape_mapper_args(other)
return '%s < %s' % (self.name, other)
def __le__(self, other):
other = _escape_mapper_args(other)
return '%s <= %s' % (self.name, other)
def __gt__(self, other):
other = _escape_mapper_args(other)
return '%s > %s' % (self.name, other)
def __ge__(self, other):
other = _escape_mapper_args(other)
return '%s >= %s' % (self.name, other)
def __eq__(self, other):
other = _escape_mapper_args(other)
return '%s = %s' % (self.name, other)
def __ne__(self, other):
other = _escape_mapper_args(other)
return '%s != %s' % (self.name, other)
def like(self, other):
other = _escape_mapper_args(other)
return "%s like '%s' " % (self.name, other)
f = F
def or_(*args):
return '(' + ' or '.join(args) + ')'
def and_(*args):
return ' and '.join(args)
| 22.75
| 51
| 0.572344
|
3305a83dae6e4396b85627393c3d99118eeff877
| 34,682
|
py
|
Python
|
electrumx/server/block_processor.py
|
commerceblock/cb-electrum-server
|
c1d5aaf32790e4679bec8b640cdbbc15bc6a9b27
|
[
"MIT"
] | 1
|
2019-06-19T23:36:28.000Z
|
2019-06-19T23:36:28.000Z
|
electrumx/server/block_processor.py
|
commerceblock/cb-electrum-server
|
c1d5aaf32790e4679bec8b640cdbbc15bc6a9b27
|
[
"MIT"
] | 5
|
2018-11-16T14:06:30.000Z
|
2019-08-30T14:05:08.000Z
|
electrumx/server/block_processor.py
|
commerceblock/cb-electrum-server
|
c1d5aaf32790e4679bec8b640cdbbc15bc6a9b27
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016-2017, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Block prefetcher and chain processor.'''
import array
import asyncio
from struct import pack, unpack
import time
from functools import partial
from aiorpcx import TaskGroup, run_in_thread
import electrumx
from electrumx.server.daemon import DaemonError
from electrumx.lib.hash import hash_to_hex_str, HASHX_LEN
from electrumx.lib.merkle import Merkle, MerkleCache
from electrumx.lib.util import chunks, formatted_time, class_logger, unpack_uint64_from
import electrumx.server.db
class Prefetcher(object):
'''Prefetches blocks (in the forward direction only).'''
def __init__(self, daemon, coin, blocks_event):
self.logger = class_logger(__name__, self.__class__.__name__)
self.daemon = daemon
self.coin = coin
self.blocks_event = blocks_event
self.blocks = []
self.caught_up = False
# Access to fetched_height should be protected by the semaphore
self.fetched_height = None
self.semaphore = asyncio.Semaphore()
self.refill_event = asyncio.Event()
# The prefetched block cache size. The min cache size has
# little effect on sync time.
self.cache_size = 0
self.min_cache_size = 10 * 1024 * 1024
# This makes the first fetch be 10 blocks
self.ave_size = self.min_cache_size // 10
async def main_loop(self, bp_height):
'''Loop forever polling for more blocks.'''
await self.reset_height(bp_height)
while True:
try:
# Sleep a while if there is nothing to prefetch
await self.refill_event.wait()
if not await self._prefetch_blocks():
await asyncio.sleep(5)
except DaemonError as e:
self.logger.info('ignoring daemon error: {}'.format(e))
def get_prefetched_blocks(self):
'''Called by block processor when it is processing queued blocks.'''
blocks = self.blocks
self.blocks = []
self.cache_size = 0
self.refill_event.set()
return blocks
async def reset_height(self, height):
'''Reset to prefetch blocks from the block processor's height.
Used in blockchain reorganisations. This coroutine can be
called asynchronously to the _prefetch_blocks coroutine so we
must synchronize with a semaphore.
'''
async with self.semaphore:
self.blocks.clear()
self.cache_size = 0
self.fetched_height = height
self.refill_event.set()
daemon_height = await self.daemon.height()
behind = daemon_height - height
if behind > 0:
self.logger.info('catching up to daemon height {:,d} '
'({:,d} blocks behind)'
.format(daemon_height, behind))
else:
self.logger.info('caught up to daemon height {:,d}'
.format(daemon_height))
async def _prefetch_blocks(self):
'''Prefetch some blocks and put them on the queue.
Repeats until the queue is full or caught up.
'''
daemon = self.daemon
daemon_height = await daemon.height()
async with self.semaphore:
while self.cache_size < self.min_cache_size:
# Try and catch up all blocks but limit to room in cache.
# Constrain fetch count to between 0 and 500 regardless;
# testnet can be lumpy.
cache_room = self.min_cache_size // self.ave_size
count = min(daemon_height - self.fetched_height, cache_room)
count = min(500, max(count, 0))
if not count:
self.caught_up = True
return False
first = self.fetched_height + 1
hex_hashes = await daemon.block_hex_hashes(first, count)
if self.caught_up:
self.logger.info('new block height {:,d} hash {}'
.format(first + count-1, hex_hashes[-1]))
blocks = await daemon.raw_blocks(hex_hashes)
assert count == len(blocks)
# Special handling for genesis block
if first == 0:
blocks[0] = self.coin.genesis_block(blocks[0])
self.logger.info('verified genesis block with hash {}'
.format(hex_hashes[0]))
# Update our recent average block size estimate
size = sum(len(block) for block in blocks)
if count >= 10:
self.ave_size = size // count
else:
self.ave_size = (size + (10 - count) * self.ave_size) // 10
self.blocks.extend(blocks)
self.cache_size += size
self.fetched_height += count
self.blocks_event.set()
self.refill_event.clear()
return True
class HeaderSource(object):
def __init__(self, db):
self.hashes = db.fs_block_hashes
class ChainError(Exception):
'''Raised on error processing blocks.'''
class BlockProcessor(electrumx.server.db.DB):
'''Process blocks and update the DB state to match.
Employ a prefetcher to prefetch blocks in batches for processing.
Coordinate backing up in case of chain reorganisations.
'''
def __init__(self, env, daemon, notifications):
super().__init__(env)
self.daemon = daemon
self.notifications = notifications
self.blocks_event = asyncio.Event()
self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event)
# Meta
self.cache_MB = env.cache_MB
self.next_cache_check = 0
self.last_flush = time.time()
self.touched = set()
self.reorg_count = 0
# Header merkle cache
self.merkle = Merkle()
self.header_mc = None
# Caches of unflushed items.
self.headers = []
self.tx_hashes = []
self.undo_infos = []
# UTXO cache
self.utxo_cache = {}
self.db_deletes = []
# If the lock is successfully acquired, in-memory chain state
# is consistent with self.height
self.state_lock = asyncio.Lock()
async def run_in_thread_shielded(self, func, *args):
async with self.state_lock:
return await asyncio.shield(run_in_thread(func, *args))
async def check_and_advance_blocks(self, raw_blocks):
'''Process the list of raw blocks passed. Detects and handles
reorgs.
'''
if not raw_blocks:
return
first = self.height + 1
blocks = [self.coin.block(raw_block, first + n)
for n, raw_block in enumerate(raw_blocks)]
headers = [block.header for block in blocks]
hprevs = [self.coin.header_prevhash(h) for h in headers]
chain = [self.tip] + [self.coin.header_hash(h) for h in headers[:-1]]
if hprevs == chain:
await self.run_in_thread_shielded(self.advance_blocks, blocks)
if self._caught_up_event.is_set():
await self.notifications.on_block(self.touched, self.height)
self.touched = set()
elif hprevs[0] != chain[0]:
await self.reorg_chain()
else:
# It is probably possible but extremely rare that what
# bitcoind returns doesn't form a chain because it
# reorg-ed the chain as it was processing the batched
# block hash requests. Should this happen it's simplest
# just to reset the prefetcher and try again.
self.logger.warning('daemon blocks do not form a chain; '
'resetting the prefetcher')
await self.prefetcher.reset_height(self.height)
async def reorg_chain(self, count=None):
'''Handle a chain reorganisation.
Count is the number of blocks to simulate a reorg, or None for
a real reorg.'''
if count is None:
self.logger.info('chain reorg detected')
else:
self.logger.info(f'faking a reorg of {count:,d} blocks')
await run_in_thread(self.flush, True)
async def get_raw_blocks(last_height, hex_hashes):
heights = range(last_height, last_height - len(hex_hashes), -1)
try:
blocks = [self.read_raw_block(height) for height in heights]
self.logger.info(f'read {len(blocks)} blocks from disk')
return blocks
except Exception:
return await self.daemon.raw_blocks(hex_hashes)
start, last, hashes = await self.reorg_hashes(count)
# Reverse and convert to hex strings.
hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
for hex_hashes in chunks(hashes, 50):
raw_blocks = await get_raw_blocks(last, hex_hashes)
await self.run_in_thread_shielded(self.backup_blocks, raw_blocks)
last -= len(raw_blocks)
# Truncate header_mc: header count is 1 more than the height.
# Note header_mc is None if the reorg happens at startup.
if self.header_mc:
self.header_mc.truncate(self.height + 1)
await self.prefetcher.reset_height(self.height)
async def reorg_hashes(self, count):
'''Return a pair (start, last, hashes) of blocks to back up during a
reorg.
The hashes are returned in order of increasing height. Start
is the height of the first hash, last of the last.
'''
start, count = await self.calc_reorg_range(count)
last = start + count - 1
s = '' if count == 1 else 's'
self.logger.info(f'chain was reorganised replacing {count:,d} '
f'block{s} at heights {start:,d}-{last:,d}')
return start, last, self.fs_block_hashes(start, count)
async def calc_reorg_range(self, count):
'''Calculate the reorg range'''
def diff_pos(hashes1, hashes2):
'''Returns the index of the first difference in the hash lists.
If both lists match returns their length.'''
for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)):
if hash1 != hash2:
return n
return len(hashes)
if count is None:
# A real reorg
start = self.height - 1
count = 1
while start > 0:
hashes = self.fs_block_hashes(start, count)
hex_hashes = [hash_to_hex_str(hash) for hash in hashes]
d_hex_hashes = await self.daemon.block_hex_hashes(start, count)
n = diff_pos(hex_hashes, d_hex_hashes)
if n > 0:
start += n
break
count = min(count * 2, start)
start -= count
count = (self.height - start) + 1
else:
start = (self.height - count) + 1
return start, count
def flush_state(self, batch):
'''Flush chain state to the batch.'''
now = time.time()
self.wall_time += now - self.last_flush
self.last_flush = now
self.last_flush_tx_count = self.tx_count
self.write_utxo_state(batch)
def assert_flushed(self):
'''Asserts state is fully flushed.'''
assert self.tx_count == self.fs_tx_count == self.db_tx_count
assert self.height == self.fs_height == self.db_height
assert not self.undo_infos
assert not self.utxo_cache
assert not self.db_deletes
self.history.assert_flushed()
def flush(self, flush_utxos=False):
'''Flush out cached state.
History is always flushed. UTXOs are flushed if flush_utxos.'''
if self.height == self.db_height:
self.assert_flushed()
return
flush_start = time.time()
last_flush = self.last_flush
tx_diff = self.tx_count - self.last_flush_tx_count
# Flush to file system
self.fs_flush()
fs_end = time.time()
if self.utxo_db.for_sync:
self.logger.info('flushed to FS in {:.1f}s'
.format(fs_end - flush_start))
# History next - it's fast and frees memory
hashX_count = self.history.flush()
if self.utxo_db.for_sync:
self.logger.info('flushed history in {:.1f}s for {:,d} addrs'
.format(time.time() - fs_end, hashX_count))
# Flush state last as it reads the wall time.
with self.utxo_db.write_batch() as batch:
if flush_utxos:
self.flush_utxos(batch)
self.flush_state(batch)
# Update and put the wall time again - otherwise we drop the
# time it took to commit the batch
self.flush_state(self.utxo_db)
self.logger.info('flush #{:,d} took {:.1f}s. Height {:,d} txs: {:,d}'
.format(self.history.flush_count,
self.last_flush - flush_start,
self.height, self.tx_count))
# Catch-up stats
if self.utxo_db.for_sync:
tx_per_sec = int(self.tx_count / self.wall_time)
this_tx_per_sec = 1 + int(tx_diff / (self.last_flush - last_flush))
self.logger.info('tx/sec since genesis: {:,d}, '
'since last flush: {:,d}'
.format(tx_per_sec, this_tx_per_sec))
daemon_height = self.daemon.cached_height()
if self.height > self.coin.TX_COUNT_HEIGHT:
tx_est = (daemon_height - self.height) * self.coin.TX_PER_BLOCK
else:
tx_est = ((daemon_height - self.coin.TX_COUNT_HEIGHT)
* self.coin.TX_PER_BLOCK
+ (self.coin.TX_COUNT - self.tx_count))
# Damp the enthusiasm
realism = 2.0 - 0.9 * self.height / self.coin.TX_COUNT_HEIGHT
tx_est *= max(realism, 1.0)
self.logger.info('sync time: {} ETA: {}'
.format(formatted_time(self.wall_time),
formatted_time(tx_est / this_tx_per_sec)))
def fs_flush(self):
'''Flush the things stored on the filesystem.'''
assert self.fs_height + len(self.headers) == self.height
assert self.tx_count == self.tx_counts[-1] if self.tx_counts else 0
self.fs_update(self.fs_height, self.headers, self.tx_hashes)
self.fs_height = self.height
self.fs_tx_count = self.tx_count
self.tx_hashes = []
self.headers = []
def backup_flush(self):
'''Like flush() but when backing up. All UTXOs are flushed.
hashXs - sequence of hashXs which were touched by backing
up. Searched for history entries to remove after the backup
height.
'''
assert self.height < self.db_height
self.history.assert_flushed()
flush_start = time.time()
# Backup FS (just move the pointers back)
self.fs_height = self.height
self.fs_tx_count = self.tx_count
assert not self.headers
assert not self.tx_hashes
# Backup history. self.touched can include other addresses
# which is harmless, but remove None.
self.touched.discard(None)
nremoves = self.history.backup(self.touched, self.tx_count)
self.logger.info('backing up removed {:,d} history entries'
.format(nremoves))
with self.utxo_db.write_batch() as batch:
# Flush state last as it reads the wall time.
self.flush_utxos(batch)
self.flush_state(batch)
self.logger.info('backup flush #{:,d} took {:.1f}s. '
'Height {:,d} txs: {:,d}'
.format(self.history.flush_count,
self.last_flush - flush_start,
self.height, self.tx_count))
def check_cache_size(self):
'''Flush a cache if it gets too big.'''
# Good average estimates based on traversal of subobjects and
# requesting size from Python (see deep_getsizeof).
one_MB = 1000*1000
utxo_cache_size = len(self.utxo_cache) * 205
db_deletes_size = len(self.db_deletes) * 57
hist_cache_size = self.history.unflushed_memsize()
# Roughly ntxs * 32 + nblocks * 42
tx_hash_size = ((self.tx_count - self.fs_tx_count) * 32
+ (self.height - self.fs_height) * 42)
utxo_MB = (db_deletes_size + utxo_cache_size) // one_MB
hist_MB = (hist_cache_size + tx_hash_size) // one_MB
self.logger.info('our height: {:,d} daemon: {:,d} '
'UTXOs {:,d}MB hist {:,d}MB'
.format(self.height, self.daemon.cached_height(),
utxo_MB, hist_MB))
# Flush history if it takes up over 20% of cache memory.
# Flush UTXOs once they take up 80% of cache memory.
if utxo_MB + hist_MB >= self.cache_MB or hist_MB >= self.cache_MB // 5:
self.flush(utxo_MB >= self.cache_MB * 4 // 5)
def advance_blocks(self, blocks):
'''Synchronously advance the blocks.
It is already verified they correctly connect onto our tip.
'''
start = time.time()
min_height = self.min_undo_height(self.daemon.cached_height())
height = self.height
for block in blocks:
height += 1
undo_info = self.advance_txs(block.transactions)
if height >= min_height:
self.undo_infos.append((undo_info, height))
self.write_raw_block(block.raw, height)
headers = [block.header for block in blocks]
self.height = height
self.headers.extend(headers)
self.tip = self.coin.header_hash(headers[-1])
# If caught up, flush everything as client queries are
# performed on the DB.
if self._caught_up_event.is_set():
self.flush(True)
else:
if time.time() > self.next_cache_check:
self.check_cache_size()
self.next_cache_check = time.time() + 30
if not self.first_sync:
s = '' if len(blocks) == 1 else 's'
self.logger.info('processed {:,d} block{} in {:.1f}s'
.format(len(blocks), s,
time.time() - start))
def _get_txout_value(self, txout):
assert not self.coin.EXTENDED_VOUT
return s_pack('<Q', txout.value)
def advance_txs(self, txs):
self.tx_hashes.append(b''.join(tx_hash for tx, tx_hash in txs))
# Use local vars for speed in the loops
undo_info = []
tx_num = self.tx_count
script_hashX = self.coin.hashX_from_script
s_pack = pack
put_utxo = self.utxo_cache.__setitem__
spend_utxo = self.spend_utxo
undo_info_append = undo_info.append
update_touched = self.touched.update
hashXs_by_tx = []
append_hashXs = hashXs_by_tx.append
for tx, tx_hash in txs:
hashXs = []
append_hashX = hashXs.append
tx_numb = s_pack('<I', tx_num)
# Spend the inputs
if not tx.is_coinbase:
for txin in tx.inputs:
cache_value = spend_utxo(txin.prev_hash, txin.prev_idx)
undo_info_append(cache_value)
append_hashX(cache_value[:-12])
# Add the new UTXOs
for idx, txout in enumerate(tx.outputs):
# Get the hashX. Ignore unspendable outputs
hashX = script_hashX(txout.pk_script)
if hashX:
append_hashX(hashX)
put_utxo(tx_hash + s_pack('<H', idx),
hashX + tx_numb + self._get_txout_value(txout))
append_hashXs(hashXs)
update_touched(hashXs)
tx_num += 1
self.history.add_unflushed(hashXs_by_tx, self.tx_count)
self.tx_count = tx_num
self.tx_counts.append(tx_num)
return undo_info
def backup_blocks(self, raw_blocks):
'''Backup the raw blocks and flush.
The blocks should be in order of decreasing height, starting at.
self.height. A flush is performed once the blocks are backed up.
'''
self.assert_flushed()
assert self.height >= len(raw_blocks)
coin = self.coin
for raw_block in raw_blocks:
# Check and update self.tip
block = coin.block(raw_block, self.height)
header_hash = coin.header_hash(block.header)
if header_hash != self.tip:
raise ChainError('backup block {} not tip {} at height {:,d}'
.format(hash_to_hex_str(header_hash),
hash_to_hex_str(self.tip),
self.height))
self.tip = coin.header_prevhash(block.header)
self.backup_txs(block.transactions)
self.height -= 1
self.tx_counts.pop()
self.logger.info('backed up to height {:,d}'.format(self.height))
self.backup_flush()
def backup_txs(self, txs):
# Prevout values, in order down the block (coinbase first if present)
# undo_info is in reverse block order
undo_info = self.read_undo_info(self.height)
if undo_info is None:
raise ChainError('no undo information found for height {:,d}'
.format(self.height))
n = len(undo_info)
# Use local vars for speed in the loops
s_pack = pack
put_utxo = self.utxo_cache.__setitem__
spend_utxo = self.spend_utxo
script_hashX = self.coin.hashX_from_script
touched = self.touched
undo_entry_len = (44 if self.coin.EXTENDED_VOUT else 12) + HASHX_LEN
for tx, tx_hash in reversed(txs):
for idx, txout in enumerate(tx.outputs):
# Spend the TX outputs. Be careful with unspendable
# outputs - we didn't save those in the first place.
hashX = script_hashX(txout.pk_script)
if hashX:
cache_value = spend_utxo(tx_hash, idx)
touched.add(cache_value[:-44 if self.coin.EXTENDED_VOUT else -12])
# Restore the inputs
if not tx.is_coinbase:
for txin in reversed(tx.inputs):
n -= undo_entry_len
undo_item = undo_info[n:n + undo_entry_len]
put_utxo(txin.prev_hash + s_pack('<H', txin.prev_idx),
undo_item)
touched.add(undo_item[:-44 if self.coin.EXTENDED_VOUT else -12])
assert n == 0
self.tx_count -= len(txs)
'''An in-memory UTXO cache, representing all changes to UTXO state
since the last DB flush.
We want to store millions of these in memory for optimal
performance during initial sync, because then it is possible to
spend UTXOs without ever going to the database (other than as an
entry in the address history, and there is only one such entry per
TX not per UTXO). So store them in a Python dictionary with
binary keys and values.
Key: TX_HASH + TX_IDX (32 + 2 = 34 bytes)
Value: HASHX + TX_NUM + VALUE (11 + 4 + 8 = 23 bytes)
That's 57 bytes of raw data in-memory. Python dictionary overhead
means each entry actually uses about 205 bytes of memory. So
almost 5 million UTXOs can fit in 1GB of RAM. There are
approximately 42 million UTXOs on bitcoin mainnet at height
433,000.
Semantics:
add: Add it to the cache dictionary.
spend: Remove it if in the cache dictionary. Otherwise it's
been flushed to the DB. Each UTXO is responsible for two
entries in the DB. Mark them for deletion in the next
cache flush.
The UTXO database format has to be able to do two things efficiently:
1. Given an address be able to list its UTXOs and their values
so its balance can be efficiently computed.
2. When processing transactions, for each prevout spent - a (tx_hash,
idx) pair - we have to be able to remove it from the DB. To send
notifications to clients we also need to know any address it paid
to.
To this end we maintain two "tables", one for each point above:
1. Key: b'u' + address_hashX + tx_idx + tx_num
Value: the UTXO value as a 64-bit unsigned integer
2. Key: b'h' + compressed_tx_hash + tx_idx + tx_num
Value: hashX
The compressed tx hash is just the first few bytes of the hash of
the tx in which the UTXO was created. As this is not unique there
will be potential collisions so tx_num is also in the key. When
looking up a UTXO the prefix space of the compressed hash needs to
be searched and resolved if necessary with the tx_num. The
collision rate is low (<0.1%).
'''
def spend_utxo(self, tx_hash, tx_idx):
'''Spend a UTXO and return the 33-byte value.
If the UTXO is not in the cache it must be on disk. We store
all UTXOs so not finding one indicates a logic error or DB
corruption.
'''
# Fast track is it being in the cache
idx_packed = pack('<H', tx_idx)
cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None)
if cache_value:
return cache_value
# Spend it from the DB.
# Key: b'h' + compressed_tx_hash + tx_idx + tx_num
# Value: hashX
prefix = b'h' + tx_hash[:4] + idx_packed
candidates = {db_key: hashX for db_key, hashX
in self.utxo_db.iterator(prefix=prefix)}
for hdb_key, hashX in candidates.items():
tx_num_packed = hdb_key[-4:]
if len(candidates) > 1:
tx_num, = unpack('<I', tx_num_packed)
hash, height = self.fs_tx_hash(tx_num)
if hash != tx_hash:
assert hash is not None # Should always be found
continue
# Key: b'u' + address_hashX + tx_idx + tx_num
# Value: the UTXO value as a 64-bit unsigned integer
udb_key = b'u' + hashX + hdb_key[-6:]
utxo_value_packed = self.utxo_db.get(udb_key)
if utxo_value_packed:
# Remove both entries for this UTXO
self.db_deletes.append(hdb_key)
self.db_deletes.append(udb_key)
return hashX + tx_num_packed + utxo_value_packed
raise ChainError('UTXO {} / {:,d} not found in "h" table'
.format(hash_to_hex_str(tx_hash), tx_idx))
def _flush_cached_utxo(self, batch_put, cache_key, cache_value):
assert not self.coin.EXTENDED_VOUT
hashX = cache_value[:-12]
suffix = cache_key[-2:] + cache_value[-12:-8]
batch_put(b'h' + cache_key[:4] + suffix, hashX)
batch_put(b'u' + hashX + suffix, cache_value[-8:])
def flush_utxos(self, batch):
'''Flush the cached DB writes and UTXO set to the batch.'''
# Care is needed because the writes generated by flushing the
# UTXO state may have keys in common with our write cache or
# may be in the DB already.
flush_start = time.time()
delete_count = len(self.db_deletes) // 2
utxo_cache_len = len(self.utxo_cache)
# Spends
batch_delete = batch.delete
for key in sorted(self.db_deletes):
batch_delete(key)
self.db_deletes = []
# New UTXOs
batch_put = batch.put
for cache_key, cache_value in self.utxo_cache.items():
# suffix = tx_idx + tx_num
self._flush_cached_utxo(batch_put, cache_key, cache_value)
self.utxo_cache = {}
# New undo information
self.flush_undo_infos(batch_put, self.undo_infos)
self.undo_infos = []
if self.utxo_db.for_sync:
self.logger.info('flushed {:,d} blocks with {:,d} txs, {:,d} UTXO '
'adds, {:,d} spends in {:.1f}s, committing...'
.format(self.height - self.db_height,
self.tx_count - self.db_tx_count,
utxo_cache_len, delete_count,
time.time() - flush_start))
self.utxo_flush_count = self.history.flush_count
self.db_tx_count = self.tx_count
self.db_height = self.height
self.db_tip = self.tip
async def _process_prefetched_blocks(self):
'''Loop forever processing blocks as they arrive.'''
while True:
if self.height == self.daemon.cached_height():
if not self._caught_up_event.is_set():
await self._first_caught_up()
self._caught_up_event.set()
await self.blocks_event.wait()
self.blocks_event.clear()
if self.reorg_count:
await self.reorg_chain(self.reorg_count)
self.reorg_count = 0
else:
blocks = self.prefetcher.get_prefetched_blocks()
await self.check_and_advance_blocks(blocks)
async def _first_caught_up(self):
self.logger.info(f'caught up to height {self.height}')
# Flush everything but with first_sync->False state.
first_sync = self.first_sync
self.first_sync = False
self.flush(True)
if first_sync:
self.logger.info(f'{electrumx.version} synced to '
f'height {self.height:,d}')
# Initialise the notification framework
await self.notifications.on_block(set(), self.height)
# Reopen for serving
await self.open_for_serving()
# Populate the header merkle cache
length = max(1, self.height - self.env.reorg_limit)
self.header_mc = MerkleCache(self.merkle, HeaderSource(self), length)
self.logger.info('populated header merkle cache')
async def _first_open_dbs(self):
await self.open_for_sync()
# An incomplete compaction needs to be cancelled otherwise
# restarting it will corrupt the history
self.history.cancel_compaction()
# These are our state as we move ahead of DB state
self.fs_height = self.db_height
self.fs_tx_count = self.db_tx_count
self.height = self.db_height
self.tip = self.db_tip
self.tx_count = self.db_tx_count
self.last_flush_tx_count = self.tx_count
if self.utxo_db.for_sync:
self.logger.info(f'flushing DB cache at {self.cache_MB:,d} MB')
# --- External API
async def fetch_and_process_blocks(self, caught_up_event):
'''Fetch, process and index blocks from the daemon.
Sets caught_up_event when first caught up. Flushes to disk
and shuts down cleanly if cancelled.
This is mainly because if, during initial sync ElectrumX is
asked to shut down when a large number of blocks have been
processed but not written to disk, it should write those to
disk before exiting, as otherwise a significant amount of work
could be lost.
'''
self._caught_up_event = caught_up_event
async with TaskGroup() as group:
await group.spawn(self._first_open_dbs())
# Ensure cached_height is set
await group.spawn(self.daemon.height())
try:
async with TaskGroup() as group:
await group.spawn(self.prefetcher.main_loop(self.height))
await group.spawn(self._process_prefetched_blocks())
finally:
async with self.state_lock:
# Shut down block processing
self.logger.info('flushing to DB for a clean shutdown...')
self.flush(True)
def force_chain_reorg(self, count):
'''Force a reorg of the given number of blocks.
Returns True if a reorg is queued, false if not caught up.
'''
if self._caught_up_event.is_set():
self.reorg_count = count
self.blocks_event.set()
return True
return False
class OceanBlockProcessor(BlockProcessor):
''' UTXO changes from parent block processor
UTXO cache
Key: TX_HASH + TX_IDX (32 + 2 = 34 bytes)
Value: HASHX + TX_NUM + ASSET_ID + VALUE (11 + 4 + 32 + 8 = 55 bytes)
To this end we maintain two "tables", one for each point above:
1. Key: b'u' + address_hashX + tx_idx + tx_num
Value: asset_id (32 byte) + utxo value (64 bit int)
2. Key: b'h' + compressed_tx_hash + tx_idx + tx_num
Value: hashX
'''
def _get_txout_value(self, txout):
assert self.coin.EXTENDED_VOUT
conf_value_s = txout.value[1:]
conf_value_v = txout.value[0]
if conf_value_v == 1 or conf_value_v == 0xff:
int_value, = unpack_uint64_from(conf_value_s[::-1], 0)
return txout.asset[1:] + pack('<Q', int_value)
else:
raise Exception("Confidential transactions are not yet handled")
def _flush_cached_utxo(self, batch_put, cache_key, cache_value):
assert self.coin.EXTENDED_VOUT
hashX = cache_value[:-44]
suffix = cache_key[-2:] + cache_value[-44:-40]
batch_put(b'h' + cache_key[:4] + suffix, hashX)
batch_put(b'u' + hashX + suffix, cache_value[-40:])
class DecredBlockProcessor(BlockProcessor):
async def calc_reorg_range(self, count):
start, count = super().calc_reorg_range(count)
if start > 0:
# A reorg in Decred can invalidate the previous block
start -= 1
count += 1
return start, count
| 39.056306
| 87
| 0.587394
|
50b70938957dfc575f78127156d86c336d176e5f
| 562
|
py
|
Python
|
src/icolos/utils/enums/flow_control_enums.py
|
jharrymoore/Icolos
|
c60cc00c34208ab7011d41d52a74651763673e7a
|
[
"Apache-2.0"
] | null | null | null |
src/icolos/utils/enums/flow_control_enums.py
|
jharrymoore/Icolos
|
c60cc00c34208ab7011d41d52a74651763673e7a
|
[
"Apache-2.0"
] | null | null | null |
src/icolos/utils/enums/flow_control_enums.py
|
jharrymoore/Icolos
|
c60cc00c34208ab7011d41d52a74651763673e7a
|
[
"Apache-2.0"
] | null | null | null |
from icolos.core.workflow_steps.prediction.active_learning import StepActiveLearning
from icolos.utils.enums.step_enums import StepBaseEnum
from icolos.core.flow_control.iterator import StepIterator
_SBE = StepBaseEnum
class FlowControlInitializationEnum:
# These steps are responsible for initializing other steps as part of their execution
# Keep these separate to the main pool of steps to avoid circular imports
FLOW_CONTROL_INIT_DICT = {
_SBE.STEP_ITERATOR: StepIterator,
_SBE.STEP_ACTIVE_LEARNING: StepActiveLearning,
}
| 35.125
| 89
| 0.800712
|
e48aa6f051f6f9bbefe6b37382433e4be1f23131
| 7,265
|
py
|
Python
|
userbot/modules/globalban.py
|
qmuensa/QB-USERBOT
|
5a6c098e387755256801b60c1c57f1a404edc671
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2022-02-03T15:00:26.000Z
|
2022-02-03T15:00:26.000Z
|
userbot/modules/globalban.py
|
qmuensa/QB-USERBOT
|
5a6c098e387755256801b60c1c57f1a404edc671
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/globalban.py
|
qmuensa/QB-USERBOT
|
5a6c098e387755256801b60c1c57f1a404edc671
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2022-02-21T16:07:35.000Z
|
2022-02-21T16:07:35.000Z
|
# Copyright (C) 2020 Catuserbot <https://github.com/sandy1709/catuserbot>
# Ported by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot
import asyncio
from datetime import datetime
from io import BytesIO
from telethon import events
from telethon.errors import BadRequestError
from telethon.tl.functions.channels import EditBannedRequest
from telethon.tl.types import Channel
import userbot.modules.sql_helper.gban_sql as gban_sql
from userbot import BOTLOG_CHATID
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, DEVS, bot
from userbot.events import register
from userbot.utils import edit_or_reply, get_user_from_event, man_cmd
from .admin import BANNED_RIGHTS, UNBAN_RIGHTS
async def admin_groups(grp):
admgroups = []
async for dialog in grp.client.iter_dialogs():
entity = dialog.entity
if (
isinstance(entity, Channel)
and entity.megagroup
and (entity.creator or entity.admin_rights)
):
admgroups.append(entity.id)
return admgroups
def mentionuser(name, userid):
return f"[{name}](tg://user?id={userid})"
@man_cmd(pattern="gban(?: |$)(.*)")
@register(pattern=r"^\.cgban(?: |$)(.*)", sudo=True)
async def gban(event):
if event.fwd_from:
return
gbun = await edit_or_reply(event, "`Gbanning...`")
start = datetime.now()
user, reason = await get_user_from_event(event, gbun)
if not user:
return
if user.id == (await event.client.get_me()).id:
await gbun.edit("**Ngapain NgeGban diri sendiri Goblok 🐽**")
return
if user.id in DEVS:
await gbun.edit("**Gagal GBAN karena dia adalah Pembuat saya 🗿**")
return
if gban_sql.is_gbanned(user.id):
await gbun.edit(
f"**Si** [Jamet](tg://user?id={user.id}) **ini sudah ada di daftar gbanned**"
)
else:
gban_sql.freakgban(user.id, reason)
san = []
san = await admin_groups(event)
count = 0
fiz = len(san)
if fiz == 0:
await gbun.edit("**Anda Tidak mempunyai GC yang anda admin 🥺**")
return
await gbun.edit(
f"**initiating gban of the** [Jamet](tg://user?id={user.id}) **in** `{len(san)}` **groups**"
)
for i in range(fiz):
try:
await event.client(EditBannedRequest(san[i], user.id, BANNED_RIGHTS))
await asyncio.sleep(0.5)
count += 1
except BadRequestError:
await event.client.send_message(
BOTLOG_CHATID,
f"**Anda tidak memiliki izin Banned di :**\n**Group Chat :** `{event.chat_id}`",
)
end = datetime.now()
timetaken = (end - start).seconds
if reason:
await gbun.edit(
f"**GBanned** [{user.first_name}](tg://user?id={user.id}) **in** `{count}` **groups in** `{timetaken}` **seconds**!!\n**Reason :** `{reason}`"
)
else:
await gbun.edit(
f"**GBanned** [{user.first_name}](tg://user?id={user.id}) **in** `{count}` **groups in** `{timetaken}` **seconds**!!\n**Added to gbanlist.**"
)
@man_cmd(pattern="ungban(?: |$)(.*)")
@register(pattern=r"^\.cungban(?: |$)(.*)", sudo=True)
async def ungban(event):
if event.fwd_from:
return
ungbun = await edit_or_reply(event, "`UnGbanning...`")
start = datetime.now()
user, reason = await get_user_from_event(event, ungbun)
if not user:
return
if gban_sql.is_gbanned(user.id):
gban_sql.freakungban(user.id)
else:
await ungbun.edit(
f"**Si** [Jamet](tg://user?id={user.id}) **ini tidak ada dalam daftar gban Anda**"
)
return
san = []
san = await admin_groups(event)
count = 0
fiz = len(san)
if fiz == 0:
await ungbun.edit("**Anda Tidak mempunyai GC yang anda admin 🥺**")
return
await ungbun.edit(
f"**initiating ungban of the** [Jamet](tg://user?id={user.id}) **in** `{len(san)}` **groups**"
)
for i in range(fiz):
try:
await event.client(EditBannedRequest(san[i], user.id, UNBAN_RIGHTS))
await asyncio.sleep(0.5)
count += 1
except BadRequestError:
await event.client.send_message(
BOTLOG_CHATID,
f"**Anda tidak memiliki izin Banned di :**\n**Group Chat :** `{event.chat_id}`",
)
end = datetime.now()
timetaken = (end - start).seconds
if reason:
await ungbun.edit(
f"**Ungbanned** [{user.first_name}](tg://user?id={user.id}`) **in** `{count}` **groups in** `{timetaken}` **seconds**!!\n**Reason :** `{reason}`"
)
else:
await ungbun.edit(
f"**Ungbanned** [{user.first_name}](tg://user?id={user.id}) **in** `{count}` **groups in** `{timetaken}` **seconds**!!\n**Removed from gbanlist**"
)
@man_cmd(pattern="listgban$")
async def gablist(event):
if event.fwd_from:
return
gbanned_users = gban_sql.get_all_gbanned()
GBANNED_LIST = "**List Global Banned Saat Ini**\n"
if len(gbanned_users) > 0:
for a_user in gbanned_users:
if a_user.reason:
GBANNED_LIST += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) **Reason** `{a_user.reason}`\n"
else:
GBANNED_LIST += (
f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) `No Reason`\n"
)
if len(gbanned_users) >= 4096:
with BytesIO(str.encode(GBANNED_LIST)) as fileuser:
fileuser.name = "list-gban.txt"
await event.client.send_file(
event.chat_id,
fileuser,
force_document=True,
thumb="userbot/resources/logo.jpg",
caption="**List Global Banned**",
allow_cache=False,
)
else:
GBANNED_LIST = "Belum ada Pengguna yang Di-Gban"
await edit_or_reply(event, GBANNED_LIST)
@bot.on(events.ChatAction)
async def _(event):
if event.user_joined or event.added_by:
user = await event.get_user()
chat = await event.get_chat()
if gban_sql.is_gbanned(user.id) and chat.admin_rights:
try:
await event.client.edit_permissions(
chat.id,
user.id,
view_messages=False,
)
await event.reply(
f"**#GBanned_User** Joined.\n\n** • First Name:** [{user.first_name}](tg://user?id={user.id})\n • **Action:** `Banned`"
)
except BaseException:
pass
# Ported by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot
CMD_HELP.update(
{
"gban": f"**Plugin : **`gban`\
\n\n • **Syntax :** `{cmd}gban` <username/id>\
\n • **Function : **Melakukan Banned Secara Global Ke Semua Grup Dimana anda Sebagai Admin.\
\n\n • **Syntax :** `{cmd}ungban` <username/id>\
\n • **Function : **Membatalkan Global Banned\
\n\n • **Syntax :** `{cmd}listgban`\
\n • **Function : **Menampilkan List Global Banned\
"
}
)
| 34.43128
| 158
| 0.577151
|
af53796fce82c63c53d91ac174159281d20f6d2a
| 620
|
py
|
Python
|
Aulas/Mundo 3/091.py
|
JoaoEmanuell/Meus-Estudos-Python
|
f6f6eeb6016919e594613785ffe7136d74241ada
|
[
"MIT"
] | 2
|
2021-07-29T18:58:02.000Z
|
2021-10-29T21:11:22.000Z
|
Aulas/Mundo 3/091.py
|
JoaoEmanuell/Meus-Estudos-Python
|
f6f6eeb6016919e594613785ffe7136d74241ada
|
[
"MIT"
] | null | null | null |
Aulas/Mundo 3/091.py
|
JoaoEmanuell/Meus-Estudos-Python
|
f6f6eeb6016919e594613785ffe7136d74241ada
|
[
"MIT"
] | null | null | null |
from random import randint
from operator import itemgetter
jogo = {'jogador1': randint(1, 6),
'jogador2': randint(1, 6),
'jogador3': randint(1, 6),
'jogador4': randint(1, 6)}
print('Valores sorteados')
rank = []
for k, v in jogo.items():
print(f'{k} tirou {v} no dado')
print('=' * 30)
rank = sorted(jogo.items(), key=itemgetter(1), reverse=True)#o itemgetter vai ser responsavel por permitr uma organização de forma mais facil, ele ira pegar os dados do dicionario, e utiliza-los de forma organizada
for i, v in enumerate(rank):
print(f'{i+1}° o lugar: {v[0]} com {v[1]}')
| 41.333333
| 215
| 0.646774
|
2732c1e924031c3533ec180c59d1ba8aba3c1fa4
| 2,980
|
py
|
Python
|
src/secondaires/navigation/equipage/volontes/virer_tribord.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
src/secondaires/navigation/equipage/volontes/virer_tribord.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
src/secondaires/navigation/equipage/volontes/virer_tribord.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la volonté VirerTribord."""
import re
from secondaires.navigation.equipage.volontes.virer_gouvernail import \
VirerGouvernail
class VirerTribord(VirerGouvernail):
"""Classe représentant une volonté.
Cette volonté choisit un matelot pour, si besoin, se déplacer
dans la salle du gouvernail, le prendre en main et lui demander de
virer sur tribord. Cette volonté utilise donc l'alignement relatif,
à la différence de 'virer' qui utilise l'alignement absolu.
"""
cle = "virer_tribord"
ordre_court = re.compile(r"^vt([0-9]{1,3})$", re.I)
ordre_long = re.compile(r"^virer\s+tribord\s+([0-9]{1,3})$", re.I)
def executer(self, couple):
"""Exécute la volonté."""
self.navire.equipage.retirer_controle("direction")
VirerGouvernail.executer(self, couple)
def crier_ordres(self, personnage):
"""On fait crier l'ordre au personnage."""
direction = int((self.direction - self.navire.direction.direction) % \
180)
msg = "{} s'écrie : virez de {}° tribord !".format(
personnage.distinction_audible, direction)
self.navire.envoyer(msg)
@classmethod
def extraire_arguments(cls, navire, direction):
"""Extrait les arguments de la volonté."""
direction = int(direction) % 180
return ((navire.direction.direction + direction) % 360, )
| 41.971831
| 79
| 0.72349
|
c2fc460528777bc979b0eb11d01deb297cf8daac
| 800
|
py
|
Python
|
ros/src/twist_controller/pid.py
|
mrhm2000/CarND-Capstone
|
804182ecfc329d9a593555eff795da1f7cf628e2
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/pid.py
|
mrhm2000/CarND-Capstone
|
804182ecfc329d9a593555eff795da1f7cf628e2
|
[
"MIT"
] | 2
|
2018-06-25T16:28:45.000Z
|
2018-07-01T14:07:40.000Z
|
ros/src/twist_controller/pid.py
|
mrhm2000/CarND-Capstone
|
804182ecfc329d9a593555eff795da1f7cf628e2
|
[
"MIT"
] | 8
|
2018-06-10T17:18:03.000Z
|
2018-07-19T18:28:07.000Z
|
MIN_NUM = float('-inf')
MAX_NUM = float('inf')
class PID(object):
def __init__(self, kp, ki, kd, min=MIN_NUM, max=MAX_NUM):
self.kp = kp
self.ki = ki
self.kd = kd
self.min = min
self.max = max
self.integral = self.last_error = 0.
def reset(self):
self.integral = 0.0
def step(self, error, sample_time):
integral = self.integral + error * sample_time
derivative = (error - self.last_error) / sample_time
result = self.kp * error + self.ki * integral + self.kd * derivative
if result > self.max:
result = self.max
elif result < self.min:
result = self.min
else:
self.integral = integral
self.last_error = error
return result
| 23.529412
| 76
| 0.55625
|
4a46d3a6597e11d7e43ec9f801e82f47f8d06b99
| 9,018
|
py
|
Python
|
salt/modules/match.py
|
fictivekin/salt
|
f4b6f815d47ab8c790028e8ddad64ee0f8bb3f00
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/match.py
|
fictivekin/salt
|
f4b6f815d47ab8c790028e8ddad64ee0f8bb3f00
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/match.py
|
fictivekin/salt
|
f4b6f815d47ab8c790028e8ddad64ee0f8bb3f00
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
The match module allows for match routines to be run and determine target specs
'''
from __future__ import absolute_import
# Import python libs
import inspect
import logging
import sys
# Import salt libs
import salt.minion
import salt.utils
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.ext.six import string_types
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__)
def compound(tgt, minion_id=None):
'''
Return True if the minion ID matches the given compound target
minion_id
Specify the minion ID to match against the target expression
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' match.compound 'L@cheese,foo and *'
'''
opts = {'grains': __grains__}
if minion_id is not None:
if not isinstance(minion_id, string_types):
minion_id = str(minion_id)
else:
minion_id = __grains__['id']
opts['id'] = minion_id
matcher = salt.minion.Matcher(opts, __salt__)
try:
return matcher.compound_match(tgt)
except Exception as exc:
log.exception(exc)
return False
def ipcidr(tgt):
'''
Return True if the minion matches the given ipcidr target
CLI Example:
.. code-block:: bash
salt '*' match.ipcidr '192.168.44.0/24'
delimiter
Pillar Example:
.. code-block:: yaml
'172.16.0.0/12':
- match: ipcidr
- nodeclass: internal
'''
matcher = salt.minion.Matcher({'grains': __grains__}, __salt__)
try:
return matcher.ipcidr_match(tgt)
except Exception as exc:
log.exception(exc)
return False
def pillar_pcre(tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Return True if the minion matches the given pillar_pcre target. The
``delimiter`` argument can be used to specify a different delimiter.
CLI Example:
.. code-block:: bash
salt '*' match.pillar_pcre 'cheese:(swiss|american)'
salt '*' match.pillar_pcre 'clone_url|https://github\\.com/.*\\.git' delimiter='|'
delimiter
Specify an alternate delimiter to use when traversing a nested dict
.. versionadded:: 2014.7.0
delim
Specify an alternate delimiter to use when traversing a nested dict
.. versionadded:: 0.16.4
.. deprecated:: 2015.8.0
'''
matcher = salt.minion.Matcher({'pillar': __pillar__}, __salt__)
try:
return matcher.pillar_pcre_match(tgt, delimiter=delimiter)
except Exception as exc:
log.exception(exc)
return False
def pillar(tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Return True if the minion matches the given pillar target. The
``delimiter`` argument can be used to specify a different delimiter.
CLI Example:
.. code-block:: bash
salt '*' match.pillar 'cheese:foo'
salt '*' match.pillar 'clone_url|https://github.com/saltstack/salt.git' delimiter='|'
delimiter
Specify an alternate delimiter to use when traversing a nested dict
.. versionadded:: 2014.7.0
delim
Specify an alternate delimiter to use when traversing a nested dict
.. versionadded:: 0.16.4
.. deprecated:: 2015.8.0
'''
matcher = salt.minion.Matcher({'pillar': __pillar__}, __salt__)
try:
return matcher.pillar_match(tgt, delimiter=delimiter)
except Exception as exc:
log.exception(exc)
return False
def data(tgt):
'''
Return True if the minion matches the given data target
CLI Example:
.. code-block:: bash
salt '*' match.data 'spam:eggs'
'''
matcher = salt.minion.Matcher(__opts__, __salt__)
try:
return matcher.data_match(tgt)
except Exception as exc:
log.exception(exc)
return False
def grain_pcre(tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Return True if the minion matches the given grain_pcre target. The
``delimiter`` argument can be used to specify a different delimiter.
CLI Example:
.. code-block:: bash
salt '*' match.grain_pcre 'os:Fedo.*'
salt '*' match.grain_pcre 'ipv6|2001:.*' delimiter='|'
delimiter
Specify an alternate delimiter to use when traversing a nested dict
.. versionadded:: 2014.7.0
delim
Specify an alternate delimiter to use when traversing a nested dict
.. versionadded:: 0.16.4
.. deprecated:: 2015.8.0
'''
matcher = salt.minion.Matcher({'grains': __grains__}, __salt__)
try:
return matcher.grain_pcre_match(tgt, delimiter=delimiter)
except Exception as exc:
log.exception(exc)
return False
def grain(tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Return True if the minion matches the given grain target. The ``delimiter``
argument can be used to specify a different delimiter.
CLI Example:
.. code-block:: bash
salt '*' match.grain 'os:Ubuntu'
salt '*' match.grain 'ipv6|2001:db8::ff00:42:8329' delimiter='|'
delimiter
Specify an alternate delimiter to use when traversing a nested dict
.. versionadded:: 2014.7.0
delim
Specify an alternate delimiter to use when traversing a nested dict
.. versionadded:: 0.16.4
.. deprecated:: 2015.8.0
'''
matcher = salt.minion.Matcher({'grains': __grains__}, __salt__)
try:
return matcher.grain_match(tgt, delimiter=delimiter)
except Exception as exc:
log.exception(exc)
return False
def list_(tgt, minion_id=None):
'''
Return True if the minion ID matches the given list target
minion_id
Specify the minion ID to match against the target expression
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' match.list 'server1,server2'
'''
if minion_id is not None:
if not isinstance(minion_id, string_types):
minion_id = str(minion_id)
else:
minion_id = __grains__['id']
matcher = salt.minion.Matcher({'id': minion_id}, __salt__)
try:
return matcher.list_match(tgt)
except Exception as exc:
log.exception(exc)
return False
def pcre(tgt, minion_id=None):
'''
Return True if the minion ID matches the given pcre target
minion_id
Specify the minion ID to match against the target expression
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' match.pcre '.*'
'''
if minion_id is not None:
if not isinstance(minion_id, string_types):
minion_id = str(minion_id)
else:
minion_id = __grains__['id']
matcher = salt.minion.Matcher({'id': minion_id}, __salt__)
try:
return matcher.pcre_match(tgt)
except Exception as exc:
log.exception(exc)
return False
def glob(tgt, minion_id=None):
'''
Return True if the minion ID matches the given glob target
minion_id
Specify the minion ID to match against the target expression
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' match.glob '*'
'''
if minion_id is not None:
if not isinstance(minion_id, string_types):
minion_id = str(minion_id)
else:
minion_id = __grains__['id']
matcher = salt.minion.Matcher({'id': minion_id}, __salt__)
try:
return matcher.glob_match(tgt)
except Exception as exc:
log.exception(exc)
return False
def filter_by(lookup,
tgt_type='compound',
minion_id=None,
expr_form=None):
'''
Return the first match in a dictionary of target patterns
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' match.filter_by '{foo*: Foo!, bar*: Bar!}' minion_id=bar03
Pillar Example:
.. code-block:: yaml
# Filter the data for the current minion into a variable:
{% set roles = salt['match.filter_by']({
'web*': ['app', 'caching'],
'db*': ['db'],
}) %}
# Make the filtered data available to Pillar:
roles: {{ roles | yaml() }}
'''
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
expr_funcs = dict(inspect.getmembers(sys.modules[__name__],
predicate=inspect.isfunction))
for key in lookup:
params = (key, minion_id) if minion_id else (key, )
if expr_funcs[tgt_type](*params):
return lookup[key]
return None
| 25.260504
| 93
| 0.626081
|
e5adbee718a71e09e83dc6cb75e0ad77f2e474c7
| 7,529
|
py
|
Python
|
pymultifracs/cumulants.py
|
MerlinDumeur/pymultifracs
|
9767ee4c34a3a39a7609f40afd6265151ba4e550
|
[
"MIT"
] | 9
|
2019-03-29T05:28:42.000Z
|
2019-12-29T12:41:15.000Z
|
pymultifracs/cumulants.py
|
MerlinDumeur/pymultifracs
|
9767ee4c34a3a39a7609f40afd6265151ba4e550
|
[
"MIT"
] | 4
|
2021-01-20T14:58:03.000Z
|
2021-03-01T11:52:09.000Z
|
pymultifracs/cumulants.py
|
MerlinDumeur/pymultifracs
|
9767ee4c34a3a39a7609f40afd6265151ba4e550
|
[
"MIT"
] | 6
|
2021-02-08T15:23:39.000Z
|
2022-03-28T13:30:46.000Z
|
"""
Authors: Omar D. Domingues <omar.darwiche-domingues@inria.fr>
Merlin Dumeur <merlin@dumeur.net>
"""
from dataclasses import dataclass, field, InitVar
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import binom as binomial_coefficient
from .utils import linear_regression, fast_power
from .multiresquantity import MultiResolutionQuantity, \
MultiResolutionQuantityBase
# from .viz import plot_multiscale
@dataclass
class Cumulants(MultiResolutionQuantityBase):
r"""
Computes and analyzes cumulants
Parameters
----------
mrq : MultiResolutionQuantity
Multi resolution quantity to analyze.
n_cumul : int
Number of cumulants to compute.
j1 : int
Lower-bound of the scale support for the linear regressions.
j2 : int
Upper-bound of the scale support for the linear regressions.
weighted: bool
Whether to used weighted linear regressions.
Attributes
----------
formalism : str
Formalism used. Can be any of 'wavelet coefs', 'wavelet leaders',
or 'wavelet p-leaders'.
nj : dict(ndarray)
Number of coefficients at scale j.
Arrays are of the shape (nrep,)
values : ndarray, shape (n_cumulants, n_scales, nrep)
:math:`C_m(j)`.
n_cumul : int
Number of computed cumulants.
j1 : int
Lower-bound of the scale support for the linear regressions.
j2 : int
Upper-bound of the scale support for the linear regressions.
weighted : bool
Whether weighted regression was performed.
m : ndarray, shape (n_cumulants,)
List of the m values (cumulants), in order presented in the value
arrays.
j : ndarray, shape (n_scales,)
List of the j values (scales), in order presented in the value arrays.
log_cumulants : ndarray, shape (n_cumulants, nrep)
:math:`(c_m)_m`, slopes of the curves :math:`j \times C_m(j)`.
var_log_cumulants : ndarray, shape (n_cumulants, nrep)
Estimates of the log-cumulants
.. warning:: var_log_cumulants
was not debugged
nrep : int
Number of realisations
"""
mrq: InitVar[MultiResolutionQuantity]
n_cumul: int
j1: int
j2: int
weighted: bool
m: np.ndarray = field(init=False)
j: np.ndarray = field(init=False)
values: np.ndarray = field(init=False)
log_cumulants: np.ndarray = field(init=False)
var_log_cumulants: np.ndarray = field(init=False)
def __post_init__(self, mrq):
self.formalism = mrq.formalism
self.nj = mrq.nj
self.nrep = mrq.nrep
self.j = np.array(list(mrq.values))
self.m = np.arange(1, self.n_cumul+1)
self.values = np.zeros((len(self.m), len(self.j), self.nrep))
self._compute(mrq)
self._compute_log_cumulants()
def _compute(self, mrq):
moments = np.zeros((len(self.m), len(self.j), self.nrep))
for ind_j, j in enumerate(self.j):
T_X_j = np.abs(mrq.values[j])
log_T_X_j = np.log(T_X_j)
for ind_m, m in enumerate(self.m):
moments[ind_m, ind_j] = np.nanmean(fast_power(log_T_X_j, m),
axis=0)
if m == 1:
self.values[ind_m, ind_j] = moments[ind_m, ind_j]
else:
aux = 0
for ind_n, n in enumerate(np.arange(1, m)):
aux += (binomial_coefficient(m-1, n-1)
* self.values[ind_n, ind_j]
* moments[ind_m-ind_n-1, ind_j])
self.values[ind_m, ind_j] = moments[ind_m, ind_j] - aux
def _compute_log_cumulants(self):
"""
Compute the log-cumulants
(angular coefficients of the curves j->log[C_p(j)])
"""
self.log_cumulants = np.zeros(((len(self.m), self.nrep)))
self.var_log_cumulants = np.zeros((len(self.m), self.nrep))
self.slope = np.zeros((len(self.m), self.nrep))
self.intercept = np.zeros((len(self.m), self.nrep))
log2_e = np.log2(np.exp(1))
x = np.tile(np.arange(self.j1, self.j2+1)[:, None],
(1, self.nrep))
if self.weighted:
nj = self.get_nj_interv(self.j1, self.j2)
else:
nj = np.ones((len(x), self.nrep))
# nj = np.tile(nj[:, None], (1, self.nrep))
ind_j1 = self.j1-1
ind_j2 = self.j2-1
for ind_m, _ in enumerate(self.m):
y = self.values[ind_m, ind_j1:ind_j2+1]
# pylint: disable=unbalanced-tuple-unpacking
slope, intercept, var_slope = \
linear_regression(x, y, nj, return_variance=True)
self.log_cumulants[ind_m] = slope*log2_e
self.var_log_cumulants[ind_m] = (log2_e**2)*var_slope
self.slope[ind_m] = slope
self.intercept[ind_m] = intercept
def __getattr__(self, name):
if name[0] == 'c' and len(name) == 2 and name[1:].isdigit():
return self.log_cumulants[self.m == int(name[1])]
if name[0] == 'C' and len(name) == 2 and name[1:].isdigit():
return self.values[self.m == int(name[1])]
if name == 'M':
return -self.c2
return self.__getattribute__(name)
def plot(self, fignum=1, nrow=3, filename=None):
"""
Plots the cumulants.
Args:
fignum(int): figure number
plt : pointer to matplotlib.pyplot
"""
nrow = min(nrow, len(self.m))
if len(self.m) > 1:
plot_dim_1 = nrow
plot_dim_2 = int(np.ceil(len(self.m) / nrow))
else:
plot_dim_1 = 1
plot_dim_2 = 1
fig, axes = plt.subplots(plot_dim_1,
plot_dim_2,
num=fignum,
squeeze=False)
fig.suptitle(self.formalism + r' - cumulants $C_m(j)$')
x = self.j
for ind_m, m in enumerate(self.m):
y = self.values[ind_m, :]
ax = axes[ind_m % nrow][ind_m // nrow]
if self.nrep == 1:
ax.plot(x, y, 'r--.')
ax.set_xlabel('j')
ax.set_ylabel('m = ' + str(m))
# ax.grid()
# plt.draw()
if len(self.log_cumulants) > 0:
# plot regression line
x0 = self.j1
x1 = self.j2
slope_log2_e = self.log_cumulants[ind_m]
slope = self.slope[ind_m]
intercept = self.intercept[ind_m]
y0 = slope*x0 + intercept
y1 = slope*x1 + intercept
legend = (r'slope [$\times \log_2(e)]$ = '
'%.5f' % (slope_log2_e))
ax.plot([x0, x1], [y0, y1], color='k',
linestyle='-', linewidth=2, label=legend)
ax.legend()
plt.draw()
else:
pass
# plot_multiscale({(i, 'cm'): self.values[m, j, nrep] for },
# {'cm': '#00000020', 'cm_avg': '#000000ff'}, ax)
for j in range(ind_m + 1, len(axes.flat)):
fig.delaxes(axes[j % nrow][j // nrow])
if filename is not None:
plt.savefig(filename)
| 32.175214
| 78
| 0.53699
|
a957f445ff1d9c3e270c79b09e653c697dac97c9
| 146
|
py
|
Python
|
lista_convidados.py
|
WestenPy/Curso_em_video
|
9f6a9775d27e1b86d54b381aba5da69b2ae21b27
|
[
"MIT"
] | null | null | null |
lista_convidados.py
|
WestenPy/Curso_em_video
|
9f6a9775d27e1b86d54b381aba5da69b2ae21b27
|
[
"MIT"
] | null | null | null |
lista_convidados.py
|
WestenPy/Curso_em_video
|
9f6a9775d27e1b86d54b381aba5da69b2ae21b27
|
[
"MIT"
] | null | null | null |
lista = [1, 2, 3, 4, 5]
lista.append(6)
print(lista)
print()
a = lista.pop(0)
b = lista.pop(3)
print(lista)
print(a, b)
del lista[1]
print(lista)
| 13.272727
| 23
| 0.636986
|
6424e394df45ee3ca29991691ccc559c1fb06ed3
| 14,821
|
py
|
Python
|
isic/utils/dataset.py
|
bomcon123456/isic
|
bf86f6a057c96c85a089587e2e1076bf5d586d3f
|
[
"Apache-2.0"
] | null | null | null |
isic/utils/dataset.py
|
bomcon123456/isic
|
bf86f6a057c96c85a089587e2e1076bf5d586d3f
|
[
"Apache-2.0"
] | 2
|
2021-09-28T05:33:52.000Z
|
2022-02-26T09:50:37.000Z
|
isic/utils/dataset.py
|
bomcon123456/isic
|
bf86f6a057c96c85a089587e2e1076bf5d586d3f
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/utils_dataset.ipynb (unless otherwise specified).
__all__ = ['SkinLabels', 'from_label_idx_to_key', 'preprocess_df', 'split_df_to_cat_num_df', 'undersampling_df',
'oversampling_df', 'oversampling_not_flat_df', 'hybridsampling_df', 'gen_new_dts', 'get_class_weights',
'AdvancedHairAugmentation', 'DrawHair', 'Microscope', 'get_default_train_transform',
'get_advanced_train_transform', 'get_default_val_transform', 'gen_dts_color_consistancy']
# Cell
import copy
import os
import random
import torch
from torch.utils.data import DataLoader, Dataset, RandomSampler
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pytorch_lightning as pl
from tqdm import tqdm
import cv2
from PIL import Image
import albumentations as A
from ..config import *
from ..sampler import ImbalancedDatasetSampler
# Cell
class SkinLabels:
lesion_type_dict = {
'nv': 'Melanocytic nevi',
'mel': 'Melanoma',
'bkl': 'Benign keratosis-like lesions',
'bcc': 'Basal cell carcinoma',
'akiec': 'Actinic keratoses',
'vasc': 'Vascular lesions',
'df': 'Dermatofibroma'
}
lesion_type_dict_inversed = {
'Melanocytic nevi': 'nv',
'Melanoma': 'mel',
'Benign keratosis-like lesions': 'bkl',
'Basal cell carcinoma': 'bcc',
'Actinic keratoses': 'akiec',
'Vascular lesions': 'vasc',
'Dermatofibroma': 'df'
}
lesion_type_vi_dict = {
'nv': 'Nốt ruồi',
'mel': 'Ung thư hắc tố',
'bkl': 'Dày sừng lành tính',
'bcc': 'Ung thư biểu mô tế bào đáy',
'akiec': 'Dày sừng quang hóa',
'vasc': 'Thương tổn mạch máu',
'df': 'U xơ da'
}
# Cell
def from_label_idx_to_key(label_idx, labels):
label_string = labels[label_idx]
key = SkinLabels.lesion_type_dict_inversed[label_string]
return key
# Cell
def preprocess_df(df, valid_size=0.2, seed=AppConfig.SEED, image_label_only=False, img_path = PathConfig.IMAGE_PATH):
df['age'].fillna((df['age'].mean()), inplace=True)
df['path'] = img_path + '/' + df['image_id'] + '.jpg'
df['label_fullstr'] = df['dx'].map(SkinLabels.lesion_type_dict.get)
label_str = pd.Categorical(df['label_fullstr'])
df['label_index'] = label_str.codes
df_undup = df.groupby('lesion_id').count()
df_undup = df_undup[df_undup['image_id'] == 1]
df_undup.reset_index(inplace=True)
_, valid = train_test_split(df_undup['lesion_id'], test_size=valid_size,
random_state=seed,
stratify=df_undup['label_index'])
valid = set(valid)
df['val'] = df['lesion_id'].apply(lambda x: 1 if str(x) in valid else 0)
df_train = df[df['val'] == 0]
df_valid = df[df['val'] == 1]
dest_df_train = df_train.reset_index(drop=True)
dest_df_valid = df_valid.reset_index(drop=True)
if not image_label_only:
return dest_df_train, dest_df_valid, list(label_str.categories)
else:
train_imgs = []
val_imgs = []
i = 0
for df in (dest_df_train, dest_df_valid):
for j, path in enumerate(df['path']):
x = np.array(Image.open(path))
y = torch.tensor(int(df['label_index'][j]))
if i == 0:
train_imgs.append((x, y))
else:
val_imgs.append((x, y))
i += 1
return train_imgs, val_imgs, list(label_str.categories)
# Cell
def split_df_to_cat_num_df(df):
text_fields = ['image_id', 'lesion_id', 'dx', 'dx_type', 'localization', 'path', 'label_fullstr', 'sex']
text_df = df.loc[:, df.columns.isin(text_fields)].copy()
numerical_df = df.drop(columns = text_fields)
image_id_cat = pd.Categorical(df['image_id'])
text_df['img_id'] = image_id_cat.codes
numerical_df['img_id']=image_id_cat.codes
y = numerical_df['label_index']
numerical_df = numerical_df.drop(columns=['label_index'])
return text_df, numerical_df, y
# Cell
def undersampling_df(df):
from imblearn.under_sampling import RandomUnderSampler
rus = RandomUnderSampler(random_state=0)
X_resampled, y_resampled = rus.fit_resample(df.drop(columns=['label_index']), df['label_index'])
X_resampled['label_index'] = y_resampled
return X_resampled
# Cell
def oversampling_df(df):
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(random_state=0)
X_resampled, y_resampled = ros.fit_resample(df.drop(columns=['label_index']), df['label_index'])
X_resampled['label_index'] = y_resampled
return X_resampled
# Cell
def oversampling_not_flat_df(df, data_aug_rate=None):
if data_aug_rate is None:
data_aug_rate = [15,10,5,50,0,5,40]
for i in range(7):
if data_aug_rate[i]:
df=df.append([df.loc[df['label_index'] == i,:]]*(data_aug_rate[i]-1), ignore_index=True)
return df
# Cell
def hybridsampling_df(df, data_aug_rate=None):
if data_aug_rate is None:
data_aug_rate = [7, 7, 3, 15,-0.2, 4, 15]
for i in range(7):
if data_aug_rate[i]>0:
df=df.append([df.loc[df['label_index'] == i,:]]*(data_aug_rate[i]-1), ignore_index=True)
elif data_aug_rate[i]<0:
entries = df.loc[df['label_index'] == i,:].index.tolist()
dropIndices = np.random.choice(entries, size = int(len(entries)*abs(data_aug_rate[i])))
df = df.drop(dropIndices)
entries = df.loc[df['label_index'] == i,:].index.tolist()
return df
# Cell
def gen_new_dts(data_gen_aug=None):
save_path = 'D:/Workspace/ML/HAM10000/aug/images/'
if data_gen_aug is None:
data_gen_aug = [5, 5, 3, 10, 0, 3, 10]
df = pd.read_csv(PathConfig.CSV_PATH)
train_df, valid_df, labels = preprocess_df(df, 0.2)
t1 = transforms.Compose([
AdvancedHairAugmentation(8,hairs_folder='/Work/Workspace/ML/HAM10000/data/black_hair/'),
Microscope(p=0.5),
])
t2 = transforms.Compose([
DrawHair(8),
Microscope(p=0.5),
])
ts = [t1,t1,t2]
for index, row in train_df.iterrows():
label_idx = row['label_index']
if data_gen_aug[label_idx]:
for i in range(data_gen_aug[label_idx]):
path = row['path']
image_id = row['image_id'] + '_' + str(i)
new_path = save_path + image_id + '.jpg'
# image = cv2.imread(path)
# # Generate augment image
# idx = random.randint(0,2)
# tfs = ts[idx](image)
# Add to dataframe
new_row = row.copy()
new_row["image_id"] = image_id
new_row["path"] = new_path
train_df = train_df.append(new_row)
# save
# cv2.imwrite(new_path, tfs)
# Save train_df, valid_df to csv
train_df = train_df.reset_index()
train_df = train_df.drop(columns=['index','path'])
valid_df = valid_df.drop(columns=['path'])
train_df.to_csv('/Work/Workspace/ML/HAM10000/aug/train.csv', index=False)
valid_df.to_csv('/Work/Workspace/ML/HAM10000/aug/valid.csv', index=False)
return train_df, valid_df
# Cell
def get_class_weights(target):
class_sample_count = np.unique(target, return_counts=True)[1]
print(class_sample_count)
weight = 1. / class_sample_count
samples_weight = weight[target]
samples_weight = torch.from_numpy(samples_weight)
return weight, samples_weight
# Cell
class AdvancedHairAugmentation:
"""
Impose an image of a hair to the target image
Args:
hairs (int): maximum number of hairs to impose
hairs_folder (str): path to the folder with hairs images
"""
def __init__(self, hairs: int = 5, hairs_folder: str = ""):
self.hairs = hairs
self.hairs_folder = hairs_folder
def __call__(self, img):
"""
Args:
img (PIL Image): Image to draw hairs on.
Returns:
PIL Image: Image with drawn hairs.
"""
n_hairs = random.randint(0, self.hairs)
if not n_hairs:
return img
height, width, _ = img.shape # target image width and height
hair_images = [im for im in os.listdir(self.hairs_folder) if 'png' in im]
for _ in range(n_hairs):
hair = cv2.imread(os.path.join(self.hairs_folder, random.choice(hair_images)))
hair = cv2.flip(hair, random.choice([-1, 0, 1]))
hair = cv2.rotate(hair, random.choice([0, 1, 2]))
h_height, h_width, _ = hair.shape # hair image width and height
if img.shape[0] < hair.shape[0] or img.shape[1] < hair.shape[1]:
hair = cv2.resize(hair, (int(width*0.8), int(height*0.8)))
h_height, h_width, _ = hair.shape # hair image width and height
roi_ho = random.randint(0, img.shape[0] - hair.shape[0])
roi_wo = random.randint(0, img.shape[1] - hair.shape[1])
roi = img[roi_ho:roi_ho + h_height, roi_wo:roi_wo + h_width]
# Creating a mask and inverse mask
img2gray = cv2.cvtColor(hair, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of hair in ROI
img_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
# Take only region of hair from hair image.
hair_fg = cv2.bitwise_and(hair, hair, mask=mask)
# Put hair in ROI and modify the target image
dst = cv2.add(img_bg, hair_fg)
img[roi_ho:roi_ho + h_height, roi_wo:roi_wo + h_width] = dst
return img
def __repr__(self):
return f'{self.__class__.__name__}(hairs={self.hairs}, hairs_folder="{self.hairs_folder}")'
# Cell
class DrawHair:
"""
Draw a random number of pseudo hairs
Args:
hairs (int): maximum number of hairs to draw
width (tuple): possible width of the hair in pixels
"""
def __init__(self, hairs:int = 4, width:tuple = (1, 2)):
self.hairs = hairs
self.width = width
def __call__(self, img):
"""
Args:
img (PIL Image): Image to draw hairs on.
Returns:
PIL Image: Image with drawn hairs.
"""
if not self.hairs:
return img
width, height, _ = img.shape
for _ in range(random.randint(0, self.hairs)):
# The origin point of the line will always be at the top half of the image
origin = (random.randint(0, width), random.randint(0, height // 2))
# The end of the line
end = (random.randint(0, width), random.randint(0, height))
color = (0, 0, 0) # color of the hair. Black.
cv2.line(img, origin, end, color, random.randint(self.width[0], self.width[1]))
return img
def __repr__(self):
return f'{self.__class__.__name__}(hairs={self.hairs}, width={self.width})'
# Cell
class Microscope:
"""
Cutting out the edges around the center circle of the image
Imitating a picture, taken through the microscope
Args:
p (float): probability of applying an augmentation
"""
def __init__(self, p: float = 0.5):
self.p = p
def __call__(self, img):
"""
Args:
img (PIL Image): Image to apply transformation to.
Returns:
PIL Image: Image with transformation.
"""
if random.random() < self.p:
circle = cv2.circle((np.ones(img.shape) * 255).astype(np.uint8), # image placeholder
(img.shape[1]//2, img.shape[0]//2), # center point of circle
random.randint(img.shape[1]//2 - 3, img.shape[1]//2 + 15), # radius
(0, 0, 0), # color
-1)
mask = circle - 255
img = np.multiply(img, mask)
return img
def __repr__(self):
return f'{self.__class__.__name__}(p={self.p})'
# Cell
def get_default_train_transform(image_size=224, no_norm=False):
transforms_train = [
A.Transpose(p=0.5),
A.VerticalFlip(p=0.5),
A.HorizontalFlip(p=0.5),
A.Resize(400, 400),
A.RandomResizedCrop(image_size, image_size)
]
norm = A.Normalize()
if no_norm:
norm = A.Normalize(mean=0, std=1)
transforms_train.append(norm)
return A.Compose(transforms_train)
def get_advanced_train_transform(image_size=224, cut_out=True, no_norm=False):
transforms_train = [
A.Transpose(p=0.5),
A.VerticalFlip(p=0.5),
A.HorizontalFlip(p=0.5),
A.RandomBrightness(limit=0.2, p=0.75),
A.RandomContrast(limit=0.2, p=0.75),
A.OneOf([
A.MotionBlur(blur_limit=5),
A.MedianBlur(blur_limit=5),
A.GaussianBlur(blur_limit=5),
A.GaussNoise(var_limit=(5.0, 30.0)),
], p=0.7),
A.OneOf([
A.OpticalDistortion(distort_limit=1.0),
A.GridDistortion(num_steps=5, distort_limit=1.),
A.ElasticTransform(alpha=3),
], p=0.7),
A.CLAHE(clip_limit=4.0, p=0.7),
A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
A.Resize(image_size, image_size)
]
if cut_out:
transforms_train.append(A.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7))
norm = A.Normalize()
if no_norm:
norm = A.Normalize(mean=0, std=1)
transforms_train.append(norm)
return A.Compose(transforms_train)
def get_default_val_transform(image_size=224):
return A.Compose([
A.Resize(image_size, image_size),
A.Normalize()
])
# Cell
def gen_dts_color_consistancy(img_paths, output_folder_path, resize=None):
if not os.path.isdir(output_folder_path):
os.mkdir(output_folder_path)
with tqdm(total=len(img_paths), ascii=True, ncols=100) as t:
for img_path in img_paths:
img_name = img_path.split('\\')[-1]
img_ = cv2.imread(img_path, cv2.IMREAD_COLOR)
if resize is not None:
img_ = cv2.resize(img_, resize, cv2.INTER_AREA)
np_img = color_constancy(img_)
file_name = 'D:/Workspace/ML/HAM10000/aug2/' + img_name.split('.')[0] + '.jpg'
cv2.imwrite(file_name, np_img)
t.update()
| 34.30787
| 133
| 0.611362
|
b07d73c57532e63ecee6c7b10c3a74eea72c46da
| 872
|
py
|
Python
|
src/learning/NaiveBayes.py
|
alexandrabenamar/Who-Wins
|
23df54f98286e67aab39e92ac746bccf6916c231
|
[
"MIT"
] | 3
|
2018-04-10T21:52:57.000Z
|
2018-08-22T15:41:58.000Z
|
src/learning/NaiveBayes.py
|
alexandrabenamar/Who-Wins
|
23df54f98286e67aab39e92ac746bccf6916c231
|
[
"MIT"
] | null | null | null |
src/learning/NaiveBayes.py
|
alexandrabenamar/Who-Wins
|
23df54f98286e67aab39e92ac746bccf6916c231
|
[
"MIT"
] | 1
|
2020-05-18T15:46:23.000Z
|
2020-05-18T15:46:23.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pyspark.mllib.classification import NaiveBayes
from functions_MLlib import spark_context, training_set, test_set, write_result, brexit_labeled_data, mode_predict
if __name__ == "__main__" :
sc = spark_context()
numFeatures = 10000
print("Training...\n")
(training, idf) = training_set(sc, numFeatures = numFeatures)
model = NaiveBayes.train(training)
print("Test... \n")
test = test_set(sc, numFeatures = numFeatures, idf = idf)
(num_pos, num_neg) = mode_predict(model, test)
print("Test on Brexit labeled data...\n")
(accuracy, f1) = brexit_labeled_data(sc, model = model, numFeatures = numFeatures, idf = idf)
print("Saving results...")
write_result(num_pos, num_neg, accuracy = accuracy, f1 = f1, name = "Naïve Bayes")
| 27.25
| 114
| 0.658257
|
08ec6d6ad7cd41ddb71590624af6d18098ce3bbe
| 49,161
|
py
|
Python
|
integration_tests/ci_analyzer/test_analyzer_aggregation_queries.py
|
nordic-institute/X-Road-Metrics
|
249d859466bf6065257cf8b3c27d0e9db4ab2378
|
[
"MIT"
] | 2
|
2021-06-30T11:12:31.000Z
|
2021-09-24T08:50:03.000Z
|
integration_tests/ci_analyzer/test_analyzer_aggregation_queries.py
|
nordic-institute/X-Road-Metrics
|
249d859466bf6065257cf8b3c27d0e9db4ab2378
|
[
"MIT"
] | null | null | null |
integration_tests/ci_analyzer/test_analyzer_aggregation_queries.py
|
nordic-institute/X-Road-Metrics
|
249d859466bf6065257cf8b3c27d0e9db4ab2378
|
[
"MIT"
] | 2
|
2021-07-02T12:31:37.000Z
|
2021-11-09T08:44:09.000Z
|
import unittest
from unittest.mock import Mock
from integration_tests.helpers import cl_db_handler
from integration_tests.ci_analyzer.ci_analyzer_settings import Settings
from analysis_module.opmon_analyzer.AnalyzerDatabaseManager import AnalyzerDatabaseManager
from datetime import datetime
import time
class TestAnalyzerAggregationQueriesCI(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# load the test database settings
settings = Settings()
# initialize a helper for operations the test database
self.mongodb_h = cl_db_handler.MongoDBHandler(settings.MONGODB_USER, settings.MONGODB_PWD, settings.MONGODB_SERVER)
# db conf
db_config = Mock()
db_config.MDB_USER = settings.MONGODB_USER
db_config.MDB_PWD = settings.MONGODB_PWD
db_config.MDB_SERVER = settings.MONGODB_SERVER
db_config.MONGODB_URI = "mongodb://{0}:{1}@{2}/auth_db".format(settings.MONGODB_USER, settings.MONGODB_PWD, settings.MONGODB_SERVER)
db_config.MONGODB_QD = "CI_query_db"
db_config.MONGODB_AD = "CI_analyzer_database"
self._db_config = db_config
# analyzer conf
config = Mock()
config.timestamp_field = "timestamp"
config.service_call_fields = ["service_call"]
config.failed_request_ratio_threshold = 0.7
config.historic_averages_thresholds = {'request_count': 0.95}
config.relevant_cols_nested = ["service_call", "succeeded", "messageId", "timestamp"]
config.relevant_cols_general_alternative = [('requestSize', 'clientRequestSize', 'producerRequestSize'),
('responseSize', 'clientResponseSize', 'producerResponseSize')]
config.relevant_cols_general = ["_id", 'totalDuration', 'producerDurationProducerView', 'requestNwDuration',
'responseNwDuration', 'correctorStatus']
self._config = config
# set up the Analyzer database manager to be tested
self.db_manager = AnalyzerDatabaseManager(db_config, config)
def test_aggregate_data_empty_database(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
clean_documents = []
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("failed_request_ratio", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(0, len(data))
data = self.db_manager.aggregate_data("duplicate_message_ids", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(0, len(data))
data = self.db_manager.aggregate_data(model_type="time_sync_errors", metric="responseNwDuration", threshold=0,
agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(0, len(data))
data = self.db_manager.aggregate_data_for_historic_averages_model(agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(0, len(data))
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_failed_request_ratio_model_single_query(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_documents = [clean_doc]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("failed_request_ratio", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(1, data[data.succeeded].iloc[0]["count"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_failed_request_ratio_model_single_query_client_missing(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": None, "producer": client}
clean_doc["producerRequestSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_documents = [clean_doc]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("failed_request_ratio", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(1, data[data.succeeded].iloc[0]["count"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_failed_request_ratio_model_single_query_producer_missing(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": None}
clean_doc["clientRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_documents = [clean_doc]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("failed_request_ratio", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(1, data[data.succeeded].iloc[0]["count"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_failed_request_ratio_model_two_queries_in_same_period_both_true(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("5/10/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("failed_request_ratio", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(2, data[data.succeeded].iloc[0]["count"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_failed_request_ratio_model_two_queries_in_same_period_different_values(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["succeeded"] = False
client2["timestamp"] = datetime.strptime("5/10/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("failed_request_ratio", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(2, len(data))
self.assertEqual(1, data[data.succeeded].iloc[0]["count"])
self.assertEqual(1, data[~data.succeeded].iloc[0]["count"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_failed_request_ratio_model_two_queries_in_different_periods_same_values(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("5/11/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("failed_request_ratio", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(2, len(data))
self.assertEqual(2, sum(data.succeeded))
self.assertEqual(1, data[data.succeeded].iloc[0]["count"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_failed_request_ratio_model_two_queries_limited_start_time(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("9/11/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("failed_request_ratio", agg_minutes=60,
start_time=datetime.strptime("7/11/2017 10:30:00",
"%d/%m/%Y %H:%M:%S").timestamp() * 1000,
end_time=None, ids_to_exclude=[])
print(data)
self.assertEqual(1, len(data))
self.assertEqual(1, sum(data.succeeded))
self.assertEqual(1, data[data.succeeded].iloc[0]["count"])
self.assertEqual(["id2"], data[data.succeeded].iloc[0]["request_ids"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_failed_request_ratio_model_two_queries_limited_end_time(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("9/11/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("failed_request_ratio", agg_minutes=60,
start_time=None,
end_time=datetime.strptime("7/11/2017 10:30:00",
"%d/%m/%Y %H:%M:%S").timestamp() * 1000,
ids_to_exclude=[])
print(data)
self.assertEqual(1, len(data))
self.assertEqual(1, sum(data.succeeded))
self.assertEqual(1, data[data.succeeded].iloc[0]["count"])
self.assertEqual(["id1"], data[data.succeeded].iloc[0]["request_ids"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_failed_request_ratio_model_two_queries_excluded_ids(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("9/11/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("failed_request_ratio", agg_minutes=60,
start_time=None,
end_time=None,
ids_to_exclude=["id2"])
print(data)
self.assertEqual(1, len(data))
self.assertEqual(1, sum(data.succeeded))
self.assertEqual(1, data[data.succeeded].iloc[0]["count"])
self.assertEqual(["id1"], data[data.succeeded].iloc[0]["request_ids"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_duplicate_message_id_model_single_query(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_documents = [clean_doc]
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("duplicate_message_ids", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(0, len(data))
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_duplicate_message_id_model_two_queries_same_period_different_ids(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("5/10/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
client2["messageId"] = "mID2"
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("duplicate_message_ids", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(0, len(data))
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_duplicate_message_id_model_two_queries_same_period_same_ids(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("5/10/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("duplicate_message_ids", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(2, data.iloc[0]["message_id_count"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_duplicate_message_id_model_two_queries_different_periods_same_ids(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("5/12/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data("duplicate_message_ids", agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(0, len(data))
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_time_sync_model_no_anomaly_found(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_documents = [clean_doc]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data(model_type="time_sync_errors", metric="responseNwDuration", threshold=0,
agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(0, len(data))
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_time_sync_model_anomaly_found(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = -100
clean_documents = [clean_doc]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data(model_type="time_sync_errors", metric="responseNwDuration", threshold=0,
agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(1, data.iloc[0]["erroneous_count"])
self.assertEqual(-100, data.iloc[0]["avg_erroneous_diff"])
self.assertEqual(1, data.iloc[0]["request_count"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_time_sync_model_two_queries_in_same_period_one_anomalous(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("5/10/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_doc2["responseNwDuration"] = -100
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data(model_type="time_sync_errors", metric="responseNwDuration", threshold=0,
agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(1, data.iloc[0]["erroneous_count"])
self.assertEqual(-100, data.iloc[0]["avg_erroneous_diff"])
self.assertEqual(2, data.iloc[0]["request_count"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_time_sync_model_two_queries_in_same_period_both_anomalous(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = -200
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("5/10/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_doc2["responseNwDuration"] = -100
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data(model_type="time_sync_errors", metric="responseNwDuration", threshold=0,
agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(2, data.iloc[0]["erroneous_count"])
self.assertEqual(-150, data.iloc[0]["avg_erroneous_diff"])
self.assertEqual(2, data.iloc[0]["request_count"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_historic_averages_model_single_query(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_documents = [clean_doc]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data_for_historic_averages_model(agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(1, data.iloc[0]["request_count"])
self.assertEqual(1000, data.iloc[0]["mean_request_size"])
self.assertEqual(1000, data.iloc[0]["mean_response_size"])
self.assertEqual(100, data.iloc[0]["mean_client_duration"])
self.assertEqual(100, data.iloc[0]["mean_producer_duration"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_historic_averages_model_single_query_client_missing(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": None, "producer": client}
clean_doc["producerRequestSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_documents = [clean_doc]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data_for_historic_averages_model(agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
print(data)
self.assertEqual(1, len(data))
self.assertEqual(1, data.iloc[0]["request_count"])
self.assertEqual(1000, data.iloc[0]["mean_request_size"])
self.assertEqual(1000, data.iloc[0]["mean_response_size"])
self.assertEqual(None, data.iloc[0]["mean_client_duration"])
self.assertEqual(100, data.iloc[0]["mean_producer_duration"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_historic_averages_model_single_query_producer_missing(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": None}
clean_doc["clientRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_documents = [clean_doc]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data_for_historic_averages_model(agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(1, data.iloc[0]["request_count"])
self.assertEqual(1000, data.iloc[0]["mean_request_size"])
self.assertEqual(1000, data.iloc[0]["mean_response_size"])
self.assertEqual(100, data.iloc[0]["mean_client_duration"])
self.assertEqual(None, data.iloc[0]["mean_producer_duration"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_historic_averages_model_single_query_client_producer_different(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 500
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 500
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_documents = [clean_doc]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data_for_historic_averages_model(agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(1, data.iloc[0]["request_count"])
self.assertEqual(1000, data.iloc[0]["mean_request_size"])
self.assertEqual(1000, data.iloc[0]["mean_response_size"])
self.assertEqual(100, data.iloc[0]["mean_client_duration"])
self.assertEqual(100, data.iloc[0]["mean_producer_duration"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_historic_averages_model_two_queries_in_same_period(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("5/10/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data_for_historic_averages_model(agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(2, data.iloc[0]["request_count"])
self.assertEqual(1000, data.iloc[0]["mean_request_size"])
self.assertEqual(1000, data.iloc[0]["mean_response_size"])
self.assertEqual(100, data.iloc[0]["mean_client_duration"])
self.assertEqual(100, data.iloc[0]["mean_producer_duration"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_historic_averages_model_two_queries_in_same_period_one_not_successful(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("5/10/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
client2["succeeded"] = False
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data_for_historic_averages_model(agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(1, len(data))
self.assertEqual(1, data.iloc[0]["request_count"])
self.assertEqual(1000, data.iloc[0]["mean_request_size"])
self.assertEqual(1000, data.iloc[0]["mean_response_size"])
self.assertEqual(100, data.iloc[0]["mean_client_duration"])
self.assertEqual(100, data.iloc[0]["mean_producer_duration"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_historic_averages_model_two_queries_in_same_period_different_service_calls(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("5/10/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
client2["service_call"] = "sc2"
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_doc2["clientRequestSize"] = 500
clean_doc2["clientResponseSize"] = 500
clean_doc2["totalDuration"] = 50
clean_doc2["producerDurationProducerView"] = 50
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data_for_historic_averages_model(agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
self.assertEqual(2, len(data))
self.assertEqual(1, data[data.service_call == "sc1"].iloc[0]["request_count"])
self.assertEqual(1, data[data.service_call == "sc2"].iloc[0]["request_count"])
self.assertEqual(1000, data[data.service_call == "sc1"].iloc[0]["mean_request_size"])
self.assertEqual(500, data[data.service_call == "sc2"].iloc[0]["mean_request_size"])
self.assertEqual(1000, data[data.service_call == "sc1"].iloc[0]["mean_response_size"])
self.assertEqual(500, data[data.service_call == "sc2"].iloc[0]["mean_response_size"])
self.assertEqual(100, data[data.service_call == "sc1"].iloc[0]["mean_client_duration"])
self.assertEqual(50, data[data.service_call == "sc2"].iloc[0]["mean_client_duration"])
self.assertEqual(100, data[data.service_call == "sc1"].iloc[0]["mean_producer_duration"])
self.assertEqual(50, data[data.service_call == "sc2"].iloc[0]["mean_producer_duration"])
# Clean before exit
self.mongodb_h.remove_all()
def test_aggregate_data_for_historic_averages_model_two_queries_different_periods_same_service_call(self):
# Clean database state
self.mongodb_h.remove_all()
self.mongodb_h.create_indexes()
# Add clean data
client = {"timestamp": datetime.strptime("5/10/2017 10:00:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000,
"service_call": "sc1",
"succeeded": True,
"messageId": "mID"}
clean_doc = {"client": client, "producer": client}
clean_doc["clientRequestSize"] = 1000
clean_doc["producerRequestSize"] = 1000
clean_doc["clientResponseSize"] = 1000
clean_doc["producerResponseSize"] = 1000
clean_doc["_id"] = "id1"
clean_doc["correctorStatus"] = "done"
clean_doc["totalDuration"] = 100
clean_doc["producerDurationProducerView"] = 100
clean_doc["requestNwDuration"] = 100
clean_doc["responseNwDuration"] = 100
clean_doc2 = clean_doc.copy()
clean_doc2["_id"] = "id2"
client2 = client.copy()
client2["timestamp"] = datetime.strptime("5/12/2017 10:30:00", "%d/%m/%Y %H:%M:%S").timestamp() * 1000
clean_doc2["client"] = client2
clean_doc2["producer"] = client2
clean_doc2["clientRequestSize"] = 500
clean_doc2["clientResponseSize"] = 500
clean_doc2["totalDuration"] = 50
clean_doc2["producerDurationProducerView"] = 50
clean_documents = [clean_doc, clean_doc2]
self.mongodb_h.add_clean_documents(clean_documents)
# Run AnalyzerDatabaseManager
data = self.db_manager.aggregate_data_for_historic_averages_model(agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[])
print(data)
self.assertEqual(2, len(data))
self.assertEqual(1, data[data.timestamp.dt.month == 10].iloc[0]["request_count"])
self.assertEqual(1, data[data.timestamp.dt.month == 12].iloc[0]["request_count"])
self.assertEqual(1000, data[data.timestamp.dt.month == 10].iloc[0]["mean_request_size"])
self.assertEqual(500, data[data.timestamp.dt.month == 12].iloc[0]["mean_request_size"])
self.assertEqual(1000, data[data.timestamp.dt.month == 10].iloc[0]["mean_response_size"])
self.assertEqual(500, data[data.timestamp.dt.month == 12].iloc[0]["mean_response_size"])
self.assertEqual(100, data[data.timestamp.dt.month == 10].iloc[0]["mean_client_duration"])
self.assertEqual(50, data[data.timestamp.dt.month == 12].iloc[0]["mean_client_duration"])
self.assertEqual(100, data[data.timestamp.dt.month == 10].iloc[0]["mean_producer_duration"])
self.assertEqual(50, data[data.timestamp.dt.month == 12].iloc[0]["mean_producer_duration"])
# Clean before exit
self.mongodb_h.remove_all()
| 43.737544
| 140
| 0.623462
|
ef1790e1d254d801ba8736f136a67699b8eca72d
| 4,533
|
py
|
Python
|
apis/bilibili/video.py
|
LXG-Shadow/BilibiliGetFavorite
|
a3912eb983be5f420c3729d705eefbf06d240309
|
[
"Apache-2.0"
] | 60
|
2018-08-27T07:10:58.000Z
|
2021-07-14T11:13:53.000Z
|
apis/bilibili/video.py
|
LXG-Shadow/BilibiliGetFavorite
|
a3912eb983be5f420c3729d705eefbf06d240309
|
[
"Apache-2.0"
] | 6
|
2019-09-09T02:50:23.000Z
|
2021-06-08T21:46:16.000Z
|
apis/bilibili/video.py
|
LXG-Shadow/BilibiliGetFavorite
|
a3912eb983be5f420c3729d705eefbf06d240309
|
[
"Apache-2.0"
] | 17
|
2019-01-20T08:46:01.000Z
|
2021-06-30T10:44:01.000Z
|
from apis import CommonRequestWrapper
from config import Config
class QUALITY:
P4k = (120, "FLV", "超清 4K", True)
P1080F60 = (116, "FLV", "高清 1080P60", True)
P1080PLUS = (112, "FLV", "高清 1080P+", True)
P1080 = (80, "FLV", "高清 1080P", True)
P720F60 = (74, "FLV", "高清 720P", True)
P720 = (64, "FLV", "高清 720P", True)
P720MP4 = (48, "MP4", "高清 720P (MP4)", True)
P480 = (32, "FLV", "清晰 480P", False)
P360 = (16, "FLV", "流畅 360P", False)
values = [P4k, P1080F60, P1080PLUS, P1080,
P720F60, P720, P720MP4,
P480, P360]
@staticmethod
def id(quality):
return quality[0]
@staticmethod
def format(quality):
return quality[1]
@staticmethod
def description(quality):
return quality[2]
@staticmethod
def cookie(quality):
return quality[3]
class API:
cookies = Config.getCookie("bilibili")
@staticmethod
def headers():
h = Config.commonHeaders.copy()
h.update({"referer": "https://www.bilibili.com"})
return h
@staticmethod
def detail_api(bvid):
return "https://api.bilibili.com/x/web-interface/view/detail?" \
"bvid={bvid}&aid=&jsonp=jsonp".format(bvid=bvid)
@staticmethod
def pages_api(bvid):
return "https://api.bilibili.com/x/player/pagelist?bvid={bvid}".format(bvid=bvid)
@staticmethod
def playurl_api(bvid, cid, quality):
return "https://api.bilibili.com/x/player/playurl?type=&otype=json&fourk=1&" \
"avid=&bvid={bvid}&cid={cid}&qn={quality}".format(bvid=bvid,
cid=cid,
quality=quality)
@staticmethod
def bangumi_url(ep_id):
return "https://www.bilibili.com/bangumi/play/{ep_id}".format(ep_id=ep_id)
@staticmethod
def bangumi_playurl_api(bvid, cid, quality):
return "https://api.bilibili.com/pgc/player/web/playurl?type=&otype=json&fourk=1&" \
"avid=&bvid={bvid}&cid={cid}&qn={quality}".format(bvid=bvid,
cid=cid,
quality=quality)
@staticmethod
def danmu_api(cid):
return "http://comment.bilibili.com/{cid}.xml".format(cid=cid)
@CommonRequestWrapper
def getVideoInfo(bvid: str):
"""
return video ifo
:param bvid: bilibili bvid
:return: bytes
"""
return ("get",
API.detail_api(bvid),
{"headers": API.headers()}
)
@CommonRequestWrapper
def getVideoCid(bvid: str):
"""
return video pages including page name and cid
:param bvid: bilibili bvid
:return: bytes
"""
return ("get",
API.pages_api(bvid),
{"headers": API.headers(),
"cookies": API.cookies}
)
@CommonRequestWrapper
def getPlayUrl(bvid: str, cid: str, quality: int = QUALITY.id(QUALITY.P1080F60)):
"""
return video real url by bvid and cid
:param bvid: bilibili bvid
:return: bytes
"""
return ("get",
API.playurl_api(bvid, cid, quality),
{"headers": API.headers(),
"cookies": API.cookies}
)
@CommonRequestWrapper
def getBangumiInfo(ep_id:str):
"""
get bangumi info by ep id or ss id epxxx ssxxxx
:param ep_id: bilibili ep_id
:return: bytes
"""
return ("get",
API.bangumi_url(ep_id),
{"headers": API.headers(),
"cookies": API.cookies}
)
@CommonRequestWrapper
def getBangumiPlayUrl(bvid: str, cid: str, quality: int = QUALITY.id(QUALITY.P1080F60)):
"""
return video real url by bvid and cid
:param bvid: bilibili bvid
:return: bytes
"""
return ("get",
API.bangumi_playurl_api(bvid, cid, quality),
{"headers": API.headers(),
"cookies": API.cookies}
)
# videoUrl = "https://www.bilibili.com/video/av%s"
# baseUrl = "https://www.bilibili.com/video/%s"
# pagesApi = "https://www.bilibili.com/widget/getPageList?aid=%s"
# pagesApi = "https://api.bilibili.com/x/player/pagelist?bvid=%s"
# detailApi = "https://api.bilibili.com/x/web-interface/view/detail?bvid=%s&aid=&jsonp=jsonp"
# playurlApi = "https://api.bilibili.com/x/player/playurl?avid=&bvid=%s&cid=%s&qn=%s&type=&otype=json&fourk=1"
# dmApi = "http://comment.bilibili.com/%s.xml"
| 28.15528
| 110
| 0.566953
|
f40355cfb7ec8c3b4e34be4e3611baa03daeb4a4
| 304
|
py
|
Python
|
tests/test_invoice_line_item.py
|
Files-com/files-sdk-python
|
84cedc9be099cd9e4db6249ef7a9d60595487090
|
[
"MIT"
] | 14
|
2020-08-05T15:48:06.000Z
|
2021-08-18T13:13:39.000Z
|
tests/test_invoice_line_item.py
|
Files-com/files-sdk-python
|
84cedc9be099cd9e4db6249ef7a9d60595487090
|
[
"MIT"
] | 4
|
2020-10-30T14:49:25.000Z
|
2021-09-29T17:11:53.000Z
|
tests/test_invoice_line_item.py
|
Files-com/files-sdk-python
|
84cedc9be099cd9e4db6249ef7a9d60595487090
|
[
"MIT"
] | null | null | null |
import unittest
import inspect
import files_sdk
from tests.base import TestBase
from files_sdk.models import InvoiceLineItem
from files_sdk import invoice_line_item
class InvoiceLineItemTest(TestBase):
pass
# Instance Methods
# Static Methods
if __name__ == '__main__':
unittest.main()
| 21.714286
| 44
| 0.786184
|
52c895bee1aa9a9cbf371e66a93b0a5bced7f980
| 1,590
|
py
|
Python
|
client/back/server_connection.py
|
joaorura/HangmanGameOnline-HGC
|
9d32ba7bd5f95cd1d899afe3eb0f327faff36596
|
[
"MIT"
] | null | null | null |
client/back/server_connection.py
|
joaorura/HangmanGameOnline-HGC
|
9d32ba7bd5f95cd1d899afe3eb0f327faff36596
|
[
"MIT"
] | null | null | null |
client/back/server_connection.py
|
joaorura/HangmanGameOnline-HGC
|
9d32ba7bd5f95cd1d899afe3eb0f327faff36596
|
[
"MIT"
] | null | null | null |
from socket import socket
from multiprocessing import Queue
from utils.utils import check_type
from .process_all import ProcessAll
from .process_receive import ProcessReceive
from .process_send import ProcessSend
class ServerConnection:
def __init__(self, ip, port):
self.address = (ip, port)
self.process_list = []
self.socket = socket()
a = 0
while True:
try:
self.socket.connect(self.address)
break
except ConnectionRefusedError:
if a == 5:
raise ConnectionRefusedError
a += 1
continue
self.queue_send = Queue()
self.queue_receive = Queue()
self.queue_front = Queue()
self.process_send = ProcessSend(self.queue_send, self.queue_front, self.socket)
self.process_receive = ProcessReceive(self.queue_receive, self.socket)
self.process = ProcessAll(self.queue_front, self.queue_send, self.queue_receive)
self.process_list.append(self.process_send)
self.process_list.append(self.process_receive)
self.process_list.append(self.process)
def send(self, jdata):
check_type(jdata, dict)
self.queue_send.put(jdata)
def terminate(self):
for a in self.process_list:
a.terminate()
def start(self):
for a in self.process_list:
a.start()
self.process.intergame.start()
self.process.intergame.game_to_server.end()
self.terminate()
self.socket.shutdown(1)
| 30
| 88
| 0.627044
|
2391a4e8b73a7b6fd7af117cc9422ee579d79321
| 3,775
|
py
|
Python
|
homeassistant/components/sensor/zwave.py
|
hemantsangwan/home-assistant
|
28b397030d2f66bb084f80d8a237d0a2c11bac79
|
[
"MIT"
] | 2
|
2021-05-25T01:08:57.000Z
|
2022-01-09T21:02:46.000Z
|
homeassistant/components/sensor/zwave.py
|
hemantsangwan/home-assistant
|
28b397030d2f66bb084f80d8a237d0a2c11bac79
|
[
"MIT"
] | null | null | null |
homeassistant/components/sensor/zwave.py
|
hemantsangwan/home-assistant
|
28b397030d2f66bb084f80d8a237d0a2c11bac79
|
[
"MIT"
] | 1
|
2022-02-04T10:11:57.000Z
|
2022-02-04T10:11:57.000Z
|
"""
homeassistant.components.sensor.zwave
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Interfaces with Z-Wave sensors.
"""
# pylint: disable=import-error
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
import homeassistant.components.zwave as zwave
from homeassistant.helpers.device import Device
from homeassistant.const import (
ATTR_BATTERY_LEVEL, ATTR_UNIT_OF_MEASUREMENT, STATE_ON, STATE_OFF,
TEMP_CELCIUS, TEMP_FAHRENHEIT, ATTR_LOCATION)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up Z-Wave sensors. """
node = zwave.NETWORK.nodes[discovery_info[zwave.ATTR_NODE_ID]]
value = node.values[discovery_info[zwave.ATTR_VALUE_ID]]
value.set_change_verified(False)
if zwave.NETWORK.controller.node_id not in node.groups[1].associations:
node.groups[1].add_association(zwave.NETWORK.controller.node_id)
if value.command_class == zwave.COMMAND_CLASS_SENSOR_BINARY:
return [ZWaveBinarySensor(value)]
elif value.command_class == zwave.COMMAND_CLASS_SENSOR_MULTILEVEL:
return [ZWaveMultilevelSensor(value)]
class ZWaveSensor(Device):
""" Represents a Z-Wave sensor. """
def __init__(self, sensor_value):
self._value = sensor_value
self._node = sensor_value.node
dispatcher.connect(
self._value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
@property
def should_poll(self):
""" False because we will push our own state to HA when changed. """
return False
@property
def unique_id(self):
""" Returns a unique id. """
return "ZWAVE-{}-{}".format(self._node.node_id, self._value.object_id)
@property
def name(self):
""" Returns the name of the device. """
name = self._node.name or "{} {}".format(
self._node.manufacturer_name, self._node.product_name)
return "{} {}".format(name, self._value.label)
@property
def state(self):
""" Returns the state of the sensor. """
return self._value.data
@property
def state_attributes(self):
""" Returns the state attributes. """
attrs = {
zwave.ATTR_NODE_ID: self._node.node_id,
}
battery_level = self._node.get_battery_level()
if battery_level is not None:
attrs[ATTR_BATTERY_LEVEL] = battery_level
unit = self.unit
if unit:
attrs[ATTR_UNIT_OF_MEASUREMENT] = unit
location = self._node.location
if location:
attrs[ATTR_LOCATION] = location
return attrs
@property
def unit(self):
""" Unit if sensor has one. """
return self._value.units
def _value_changed(self, value):
""" Called when a value has changed on the network. """
if self._value.value_id == value.value_id:
self.update_ha_state()
# pylint: disable=too-few-public-methods
class ZWaveBinarySensor(ZWaveSensor):
""" Represents a binary sensor within Z-Wave. """
@property
def state(self):
""" Returns the state of the sensor. """
return STATE_ON if self._value.data else STATE_OFF
class ZWaveMultilevelSensor(ZWaveSensor):
""" Represents a multi level sensor Z-Wave sensor. """
@property
def state(self):
""" Returns the state of the sensor. """
value = self._value.data
if self._value.units in ('C', 'F'):
return round(value, 1)
return value
@property
def unit(self):
""" Unit of this sensor. """
unit = self._value.units
if unit == 'C':
return TEMP_CELCIUS
elif unit == 'F':
return TEMP_FAHRENHEIT
else:
return unit
| 27.757353
| 78
| 0.639205
|
834181020b98b5a79dd72dd438c316cbb19abd09
| 5,102
|
py
|
Python
|
T2API/migrations/0001_initial.py
|
hackhb18-T2/api
|
c42be466492d07d6451ff3145985cd8cc0927257
|
[
"Apache-2.0"
] | null | null | null |
T2API/migrations/0001_initial.py
|
hackhb18-T2/api
|
c42be466492d07d6451ff3145985cd8cc0927257
|
[
"Apache-2.0"
] | null | null | null |
T2API/migrations/0001_initial.py
|
hackhb18-T2/api
|
c42be466492d07d6451ff3145985cd8cc0927257
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.2 on 2018-02-17 03:15
import django.contrib.auth.models
import django.contrib.auth.validators
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Device',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mac', models.CharField(max_length=12, unique=True)),
('resolution', models.CharField(max_length=15)),
('last_ping', models.DateTimeField(auto_now=True)),
('battery_status', models.CharField(max_length=20)),
],
options={
'ordering': ('mac',),
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('ean', models.CharField(max_length=13)),
],
),
migrations.CreateModel(
name='ApiUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False,
help_text='Designates that this user has all permissions without explicitly assigning them.',
verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'},
help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.',
max_length=150, unique=True,
validators=[django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False,
help_text='Designates whether the user can log into this admin site.',
verbose_name='staff status')),
('is_active', models.BooleanField(default=True,
help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.',
verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True,
help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.',
related_name='user_set', related_query_name='user', to='auth.Group',
verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.',
related_name='user_set', related_query_name='user',
to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AddField(
model_name='device',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='devices',
to='T2API.Product'),
),
migrations.AddField(
model_name='device',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='devices',
to=settings.AUTH_USER_MODEL),
),
]
| 54.860215
| 164
| 0.525284
|
cb40d0155a934c13d9d4deddd580429b19dba719
| 2,171
|
py
|
Python
|
young/apps/users/migrations/0007_auto_20190104_1205.py
|
Aooyh/Young
|
223671c05085f79a9f12513ccc16abbb4ca801d4
|
[
"MIT"
] | 1
|
2019-04-20T07:54:21.000Z
|
2019-04-20T07:54:21.000Z
|
young/apps/users/migrations/0007_auto_20190104_1205.py
|
Aooyh/Young
|
223671c05085f79a9f12513ccc16abbb4ca801d4
|
[
"MIT"
] | null | null | null |
young/apps/users/migrations/0007_auto_20190104_1205.py
|
Aooyh/Young
|
223671c05085f79a9f12513ccc16abbb4ca801d4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-01-04 12:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0006_article_focus_url'),
]
operations = [
migrations.AddField(
model_name='article',
name='trans_count',
field=models.IntegerField(default=0, verbose_name='转发数'),
),
migrations.AddField(
model_name='comment',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL, verbose_name='评论者'),
),
migrations.AddField(
model_name='comment',
name='content',
field=models.CharField(max_length=500, null=True, verbose_name='评论内容'),
),
migrations.AddField(
model_name='comment',
name='create_time',
field=models.DateField(auto_now=True, verbose_name='创建时间'),
),
migrations.AddField(
model_name='comment',
name='update_time',
field=models.DateField(default=django.utils.timezone.now, verbose_name='更新时间'),
),
migrations.AddField(
model_name='user',
name='collect_articles',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Article', verbose_name='收藏文臧'),
),
migrations.AddField(
model_name='user',
name='like_articles',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='like_users', to='users.Article', verbose_name='点赞文章'),
),
migrations.AddField(
model_name='user',
name='like_comments',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='like_users', to='users.Comment', verbose_name='点赞评论'),
),
]
| 36.79661
| 166
| 0.620912
|
c23f8f47befa30db6d6758c8d6c0e1243c037b84
| 19,914
|
py
|
Python
|
homeassistant/components/netatmo/climate.py
|
pszafer/core
|
ab6fb5cb7705a7f2e3a4f310b5d42047c3372bd2
|
[
"Apache-2.0"
] | 3
|
2019-07-06T08:00:22.000Z
|
2021-11-12T23:01:59.000Z
|
homeassistant/components/netatmo/climate.py
|
pszafer/core
|
ab6fb5cb7705a7f2e3a4f310b5d42047c3372bd2
|
[
"Apache-2.0"
] | 44
|
2020-08-03T07:31:07.000Z
|
2022-03-31T06:02:04.000Z
|
homeassistant/components/netatmo/climate.py
|
titilambert/home-assistant
|
a2651845f379992231fd7b9c8458828036296ee0
|
[
"Apache-2.0"
] | 2
|
2017-09-03T16:06:02.000Z
|
2021-01-12T15:07:52.000Z
|
"""Support for Netatmo Smart thermostats."""
import logging
from typing import List, Optional
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
DEFAULT_MIN_TEMP,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_TEMPERATURE,
PRECISION_HALVES,
STATE_OFF,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
ATTR_HEATING_POWER_REQUEST,
ATTR_SCHEDULE_NAME,
DATA_HANDLER,
DATA_HOMES,
DATA_SCHEDULES,
DOMAIN,
EVENT_TYPE_CANCEL_SET_POINT,
EVENT_TYPE_SET_POINT,
EVENT_TYPE_THERM_MODE,
MANUFACTURER,
SERVICE_SET_SCHEDULE,
SIGNAL_NAME,
)
from .data_handler import HOMEDATA_DATA_CLASS_NAME, HOMESTATUS_DATA_CLASS_NAME
from .netatmo_entity_base import NetatmoBase
_LOGGER = logging.getLogger(__name__)
PRESET_FROST_GUARD = "Frost Guard"
PRESET_SCHEDULE = "Schedule"
PRESET_MANUAL = "Manual"
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
SUPPORT_HVAC = [HVAC_MODE_HEAT, HVAC_MODE_AUTO, HVAC_MODE_OFF]
SUPPORT_PRESET = [PRESET_AWAY, PRESET_BOOST, PRESET_FROST_GUARD, PRESET_SCHEDULE]
STATE_NETATMO_SCHEDULE = "schedule"
STATE_NETATMO_HG = "hg"
STATE_NETATMO_MAX = "max"
STATE_NETATMO_AWAY = PRESET_AWAY
STATE_NETATMO_OFF = STATE_OFF
STATE_NETATMO_MANUAL = "manual"
STATE_NETATMO_HOME = "home"
PRESET_MAP_NETATMO = {
PRESET_FROST_GUARD: STATE_NETATMO_HG,
PRESET_BOOST: STATE_NETATMO_MAX,
PRESET_SCHEDULE: STATE_NETATMO_SCHEDULE,
PRESET_AWAY: STATE_NETATMO_AWAY,
STATE_NETATMO_OFF: STATE_NETATMO_OFF,
}
NETATMO_MAP_PRESET = {
STATE_NETATMO_HG: PRESET_FROST_GUARD,
STATE_NETATMO_MAX: PRESET_BOOST,
STATE_NETATMO_SCHEDULE: PRESET_SCHEDULE,
STATE_NETATMO_AWAY: PRESET_AWAY,
STATE_NETATMO_OFF: STATE_NETATMO_OFF,
STATE_NETATMO_MANUAL: STATE_NETATMO_MANUAL,
}
HVAC_MAP_NETATMO = {
PRESET_SCHEDULE: HVAC_MODE_AUTO,
STATE_NETATMO_HG: HVAC_MODE_AUTO,
PRESET_FROST_GUARD: HVAC_MODE_AUTO,
PRESET_BOOST: HVAC_MODE_HEAT,
STATE_NETATMO_OFF: HVAC_MODE_OFF,
STATE_NETATMO_MANUAL: HVAC_MODE_AUTO,
PRESET_MANUAL: HVAC_MODE_AUTO,
STATE_NETATMO_AWAY: HVAC_MODE_AUTO,
}
CURRENT_HVAC_MAP_NETATMO = {True: CURRENT_HVAC_HEAT, False: CURRENT_HVAC_IDLE}
DEFAULT_MAX_TEMP = 30
NA_THERM = "NATherm1"
NA_VALVE = "NRV"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Netatmo energy platform."""
data_handler = hass.data[DOMAIN][entry.entry_id][DATA_HANDLER]
await data_handler.register_data_class(
HOMEDATA_DATA_CLASS_NAME, HOMEDATA_DATA_CLASS_NAME, None
)
home_data = data_handler.data.get(HOMEDATA_DATA_CLASS_NAME)
if not home_data:
return
async def get_entities():
"""Retrieve Netatmo entities."""
entities = []
for home_id in get_all_home_ids(home_data):
_LOGGER.debug("Setting up home %s ...", home_id)
for room_id in home_data.rooms[home_id].keys():
room_name = home_data.rooms[home_id][room_id]["name"]
_LOGGER.debug("Setting up room %s (%s) ...", room_name, room_id)
signal_name = f"{HOMESTATUS_DATA_CLASS_NAME}-{home_id}"
await data_handler.register_data_class(
HOMESTATUS_DATA_CLASS_NAME, signal_name, None, home_id=home_id
)
home_status = data_handler.data.get(signal_name)
if home_status and room_id in home_status.rooms:
entities.append(NetatmoThermostat(data_handler, home_id, room_id))
hass.data[DOMAIN][DATA_SCHEDULES][home_id] = {
schedule_id: schedule_data.get("name")
for schedule_id, schedule_data in (
data_handler.data[HOMEDATA_DATA_CLASS_NAME]
.schedules[home_id]
.items()
)
}
hass.data[DOMAIN][DATA_HOMES] = {
home_id: home_data.get("name")
for home_id, home_data in (
data_handler.data[HOMEDATA_DATA_CLASS_NAME].homes.items()
)
}
return entities
async_add_entities(await get_entities(), True)
platform = entity_platform.current_platform.get()
if home_data is not None:
platform.async_register_entity_service(
SERVICE_SET_SCHEDULE,
{vol.Required(ATTR_SCHEDULE_NAME): cv.string},
"_service_set_schedule",
)
class NetatmoThermostat(NetatmoBase, ClimateEntity):
"""Representation a Netatmo thermostat."""
def __init__(self, data_handler, home_id, room_id):
"""Initialize the sensor."""
ClimateEntity.__init__(self)
super().__init__(data_handler)
self._id = room_id
self._home_id = home_id
self._home_status_class = f"{HOMESTATUS_DATA_CLASS_NAME}-{self._home_id}"
self._data_classes.extend(
[
{
"name": HOMEDATA_DATA_CLASS_NAME,
SIGNAL_NAME: HOMEDATA_DATA_CLASS_NAME,
},
{
"name": HOMESTATUS_DATA_CLASS_NAME,
"home_id": self._home_id,
SIGNAL_NAME: self._home_status_class,
},
]
)
self._home_status = self.data_handler.data[self._home_status_class]
self._room_status = self._home_status.rooms[room_id]
self._room_data = self._data.rooms[home_id][room_id]
self._model = NA_VALVE
for module in self._room_data.get("module_ids"):
if self._home_status.thermostats.get(module):
self._model = NA_THERM
break
self._state = None
self._device_name = self._data.rooms[home_id][room_id]["name"]
self._name = f"{MANUFACTURER} {self._device_name}"
self._current_temperature = None
self._target_temperature = None
self._preset = None
self._away = None
self._operation_list = [HVAC_MODE_AUTO, HVAC_MODE_HEAT]
self._support_flags = SUPPORT_FLAGS
self._hvac_mode = None
self._battery_level = None
self._connected = None
self._away_temperature = None
self._hg_temperature = None
self._boilerstatus = None
self._setpoint_duration = None
if self._model == NA_THERM:
self._operation_list.append(HVAC_MODE_OFF)
self._unique_id = f"{self._id}-{self._model}"
async def async_added_to_hass(self) -> None:
"""Entity created."""
await super().async_added_to_hass()
for event_type in (
EVENT_TYPE_SET_POINT,
EVENT_TYPE_THERM_MODE,
EVENT_TYPE_CANCEL_SET_POINT,
):
self._listeners.append(
async_dispatcher_connect(
self.hass,
f"signal-{DOMAIN}-webhook-{event_type}",
self.handle_event,
)
)
async def handle_event(self, event):
"""Handle webhook events."""
data = event["data"]
if not data.get("home"):
return
home = data["home"]
if self._home_id == home["id"] and data["event_type"] == EVENT_TYPE_THERM_MODE:
self._preset = NETATMO_MAP_PRESET[home[EVENT_TYPE_THERM_MODE]]
self._hvac_mode = HVAC_MAP_NETATMO[self._preset]
if self._preset == PRESET_FROST_GUARD:
self._target_temperature = self._hg_temperature
elif self._preset == PRESET_AWAY:
self._target_temperature = self._away_temperature
elif self._preset == PRESET_SCHEDULE:
self.async_update_callback()
self.async_write_ha_state()
return
if not home.get("rooms"):
return
for room in home["rooms"]:
if data["event_type"] == EVENT_TYPE_SET_POINT:
if self._id == room["id"]:
if room["therm_setpoint_mode"] == STATE_NETATMO_OFF:
self._hvac_mode = HVAC_MODE_OFF
elif room["therm_setpoint_mode"] == STATE_NETATMO_MAX:
self._hvac_mode = HVAC_MODE_HEAT
self._target_temperature = DEFAULT_MAX_TEMP
else:
self._target_temperature = room["therm_setpoint_temperature"]
self.async_write_ha_state()
break
elif data["event_type"] == EVENT_TYPE_CANCEL_SET_POINT:
if self._id == room["id"]:
self.async_update_callback()
self.async_write_ha_state()
break
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def target_temperature_step(self) -> Optional[float]:
"""Return the supported step of target temperature."""
return PRECISION_HALVES
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode."""
return self._hvac_mode
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes."""
return self._operation_list
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation if supported."""
if self._model == NA_THERM:
return CURRENT_HVAC_MAP_NETATMO[self._boilerstatus]
# Maybe it is a valve
if self._room_status and self._room_status.get("heating_power_request", 0) > 0:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
self.turn_off()
elif hvac_mode == HVAC_MODE_AUTO:
if self.hvac_mode == HVAC_MODE_OFF:
self.turn_on()
self.set_preset_mode(PRESET_SCHEDULE)
elif hvac_mode == HVAC_MODE_HEAT:
self.set_preset_mode(PRESET_BOOST)
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if self.target_temperature == 0:
self._home_status.set_room_thermpoint(
self._id, STATE_NETATMO_HOME,
)
if preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX] and self._model == NA_VALVE:
self._home_status.set_room_thermpoint(
self._id, STATE_NETATMO_MANUAL, DEFAULT_MAX_TEMP,
)
elif preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX]:
self._home_status.set_room_thermpoint(
self._id, PRESET_MAP_NETATMO[preset_mode]
)
elif preset_mode in [PRESET_SCHEDULE, PRESET_FROST_GUARD, PRESET_AWAY]:
self._home_status.set_thermmode(PRESET_MAP_NETATMO[preset_mode])
else:
_LOGGER.error("Preset mode '%s' not available", preset_mode)
self.async_write_ha_state()
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp."""
return self._preset
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes."""
return SUPPORT_PRESET
def set_temperature(self, **kwargs):
"""Set new target temperature for 2 hours."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is None:
return
self._home_status.set_room_thermpoint(self._id, STATE_NETATMO_MANUAL, temp)
self.async_write_ha_state()
@property
def device_state_attributes(self):
"""Return the state attributes of the thermostat."""
attr = {}
if self._battery_level is not None:
attr[ATTR_BATTERY_LEVEL] = self._battery_level
if self._model == NA_VALVE:
attr[ATTR_HEATING_POWER_REQUEST] = self._room_status.get(
"heating_power_request", 0
)
return attr
def turn_off(self):
"""Turn the entity off."""
if self._model == NA_VALVE:
self._home_status.set_room_thermpoint(
self._id, STATE_NETATMO_MANUAL, DEFAULT_MIN_TEMP,
)
elif self.hvac_mode != HVAC_MODE_OFF:
self._home_status.set_room_thermpoint(self._id, STATE_NETATMO_OFF)
self.async_write_ha_state()
def turn_on(self):
"""Turn the entity on."""
self._home_status.set_room_thermpoint(self._id, STATE_NETATMO_HOME)
self.async_write_ha_state()
@property
def available(self) -> bool:
"""If the device hasn't been able to connect, mark as unavailable."""
return bool(self._connected)
@callback
def async_update_callback(self):
"""Update the entity's state."""
self._home_status = self.data_handler.data[self._home_status_class]
self._room_status = self._home_status.rooms.get(self._id)
self._room_data = self._data.rooms.get(self._home_id, {}).get(self._id)
if not self._room_status or not self._room_data:
if self._connected:
_LOGGER.info(
"The thermostat in room %s seems to be out of reach",
self._device_name,
)
self._connected = False
return
roomstatus = {"roomID": self._room_status.get("id", {})}
if self._room_status.get("reachable"):
roomstatus.update(self._build_room_status())
self._away_temperature = self._data.get_away_temp(self._home_id)
self._hg_temperature = self._data.get_hg_temp(self._home_id)
self._setpoint_duration = self._data.setpoint_duration[self._home_id]
if "current_temperature" not in roomstatus:
return
if self._model is None:
self._model = roomstatus["module_type"]
self._current_temperature = roomstatus["current_temperature"]
self._target_temperature = roomstatus["target_temperature"]
self._preset = NETATMO_MAP_PRESET[roomstatus["setpoint_mode"]]
self._hvac_mode = HVAC_MAP_NETATMO[self._preset]
self._battery_level = roomstatus.get("battery_level")
self._connected = True
self._away = self._hvac_mode == HVAC_MAP_NETATMO[STATE_NETATMO_AWAY]
def _build_room_status(self):
"""Construct room status."""
try:
roomstatus = {
"roomname": self._room_data["name"],
"target_temperature": self._room_status["therm_setpoint_temperature"],
"setpoint_mode": self._room_status["therm_setpoint_mode"],
"current_temperature": self._room_status["therm_measured_temperature"],
"module_type": self._data.get_thermostat_type(
home_id=self._home_id, room_id=self._id
),
"module_id": None,
"heating_status": None,
"heating_power_request": None,
}
batterylevel = None
for module_id in self._room_data["module_ids"]:
if (
self._data.modules[self._home_id][module_id]["type"] == NA_THERM
or roomstatus["module_id"] is None
):
roomstatus["module_id"] = module_id
if roomstatus["module_type"] == NA_THERM:
self._boilerstatus = self._home_status.boiler_status(
roomstatus["module_id"]
)
roomstatus["heating_status"] = self._boilerstatus
batterylevel = self._home_status.thermostats[
roomstatus["module_id"]
].get("battery_level")
elif roomstatus["module_type"] == NA_VALVE:
roomstatus["heating_power_request"] = self._room_status[
"heating_power_request"
]
roomstatus["heating_status"] = roomstatus["heating_power_request"] > 0
if self._boilerstatus is not None:
roomstatus["heating_status"] = (
self._boilerstatus and roomstatus["heating_status"]
)
batterylevel = self._home_status.valves[roomstatus["module_id"]].get(
"battery_level"
)
if batterylevel:
batterypct = interpolate(batterylevel, roomstatus["module_type"])
if (
not roomstatus.get("battery_level")
or batterypct < roomstatus["battery_level"]
):
roomstatus["battery_level"] = batterypct
return roomstatus
except KeyError as err:
_LOGGER.error("Update of room %s failed. Error: %s", self._id, err)
return {}
def _service_set_schedule(self, **kwargs):
schedule_name = kwargs.get(ATTR_SCHEDULE_NAME)
schedule_id = None
for sid, name in self.hass.data[DOMAIN][DATA_SCHEDULES][self._home_id].items():
if name == schedule_name:
schedule_id = sid
if not schedule_id:
_LOGGER.error("You passed an invalid schedule")
return
self._data.switch_home_schedule(home_id=self._home_id, schedule_id=schedule_id)
_LOGGER.debug(
"Setting %s schedule to %s (%s)",
self._home_id,
kwargs.get(ATTR_SCHEDULE_NAME),
schedule_id,
)
def interpolate(batterylevel, module_type):
"""Interpolate battery level depending on device type."""
na_battery_levels = {
NA_THERM: {
"full": 4100,
"high": 3600,
"medium": 3300,
"low": 3000,
"empty": 2800,
},
NA_VALVE: {
"full": 3200,
"high": 2700,
"medium": 2400,
"low": 2200,
"empty": 2200,
},
}
levels = sorted(na_battery_levels[module_type].values())
steps = [20, 50, 80, 100]
na_battery_level = na_battery_levels[module_type]
if batterylevel >= na_battery_level["full"]:
return 100
if batterylevel >= na_battery_level["high"]:
i = 3
elif batterylevel >= na_battery_level["medium"]:
i = 2
elif batterylevel >= na_battery_level["low"]:
i = 1
else:
return 0
pct = steps[i - 1] + (
(steps[i] - steps[i - 1])
* (batterylevel - levels[i])
/ (levels[i + 1] - levels[i])
)
return int(pct)
def get_all_home_ids(home_data):
"""Get all the home ids returned by NetAtmo API."""
if home_data is None:
return []
return [
home_data.homes[home_id]["id"]
for home_id in home_data.homes
if (
"therm_schedules" in home_data.homes[home_id]
and "modules" in home_data.homes[home_id]
)
]
| 34.099315
| 88
| 0.615547
|
99acede90e68c6a9be099056d7bbdaa7b7a6328b
| 5,377
|
py
|
Python
|
samcli/local/common/runtime_template.py
|
darthzyklus/aws-sam-cli
|
9c5546801e4c46a79ffc2d1f1d1e5005d0451bf0
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 859
|
2020-08-25T03:53:17.000Z
|
2022-03-31T12:33:07.000Z
|
samcli/local/common/runtime_template.py
|
darthzyklus/aws-sam-cli
|
9c5546801e4c46a79ffc2d1f1d1e5005d0451bf0
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1,369
|
2020-08-25T10:57:03.000Z
|
2022-03-31T23:00:25.000Z
|
samcli/local/common/runtime_template.py
|
darthzyklus/aws-sam-cli
|
9c5546801e4c46a79ffc2d1f1d1e5005d0451bf0
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 275
|
2020-08-25T19:33:50.000Z
|
2022-03-26T08:32:52.000Z
|
"""
All-in-one metadata about runtimes
"""
import itertools
import os
import pathlib
from typing import Set
_init_path = str(pathlib.Path(os.path.dirname(__file__)).parent.parent)
_templates = os.path.join(_init_path, "lib", "init", "templates")
_lambda_images_templates = os.path.join(_init_path, "lib", "init", "image_templates")
# Note(TheSriram): The ordering of the runtimes list per language is based on the latest to oldest.
RUNTIME_DEP_TEMPLATE_MAPPING = {
"python": [
{
"runtimes": ["python3.9", "python3.8", "python3.7", "python3.6", "python2.7"],
"dependency_manager": "pip",
"init_location": os.path.join(_templates, "cookiecutter-aws-sam-hello-python"),
"build": True,
}
],
"ruby": [
{
"runtimes": ["ruby2.5", "ruby2.7"],
"dependency_manager": "bundler",
"init_location": os.path.join(_templates, "cookiecutter-aws-sam-hello-ruby"),
"build": True,
}
],
"nodejs": [
{
"runtimes": ["nodejs14.x", "nodejs12.x", "nodejs10.x"],
"dependency_manager": "npm",
"init_location": os.path.join(_templates, "cookiecutter-aws-sam-hello-nodejs"),
"build": True,
}
],
"dotnet": [
{
"runtimes": ["dotnetcore3.1", "dotnetcore2.1"],
"dependency_manager": "cli-package",
"init_location": os.path.join(_templates, "cookiecutter-aws-sam-hello-dotnet"),
"build": True,
}
],
"go": [
{
"runtimes": ["go1.x"],
"dependency_manager": "mod",
"init_location": os.path.join(_templates, "cookiecutter-aws-sam-hello-golang"),
"build": False,
}
],
"java": [
{
"runtimes": ["java11", "java8", "java8.al2"],
"dependency_manager": "maven",
"init_location": os.path.join(_templates, "cookiecutter-aws-sam-hello-java-maven"),
"build": True,
},
{
"runtimes": ["java11", "java8", "java8.al2"],
"dependency_manager": "gradle",
"init_location": os.path.join(_templates, "cookiecutter-aws-sam-hello-java-gradle"),
"build": True,
},
],
}
def get_local_lambda_images_location(mapping, runtime):
dir_name = os.path.basename(mapping["init_location"])
if dir_name.endswith("-lambda-image"):
return os.path.join(_lambda_images_templates, runtime, dir_name)
return os.path.join(_lambda_images_templates, runtime, dir_name + "-lambda-image")
RUNTIME_TO_DEPENDENCY_MANAGERS = {
"python3.9": ["pip"],
"python3.8": ["pip"],
"python3.7": ["pip"],
"python3.6": ["pip"],
"python2.7": ["pip"],
"ruby2.5": ["bundler"],
"ruby2.7": ["bundler"],
"nodejs14.x": ["npm"],
"nodejs12.x": ["npm"],
"nodejs10.x": ["npm"],
"dotnetcore3.1": ["cli-package"],
"dotnetcore2.1": ["cli-package"],
"go1.x": ["mod"],
"java8": ["maven", "gradle"],
"java11": ["maven", "gradle"],
"java8.al2": ["maven", "gradle"],
}
SUPPORTED_DEP_MANAGERS: Set[str] = {
c["dependency_manager"] # type: ignore
for c in list(itertools.chain(*(RUNTIME_DEP_TEMPLATE_MAPPING.values())))
if c["dependency_manager"]
}
RUNTIMES: Set[str] = set(
itertools.chain(
*[c["runtimes"] for c in list(itertools.chain(*(RUNTIME_DEP_TEMPLATE_MAPPING.values())))] # type: ignore
)
)
# When adding new Lambda runtimes, please update SAM_RUNTIME_TO_SCHEMAS_CODE_LANG_MAPPING
# Runtimes are ordered in alphabetical fashion with reverse version order (latest versions first)
INIT_RUNTIMES = [
# dotnetcore runtimes in descending order
"dotnet5.0",
"dotnetcore3.1",
"dotnetcore2.1",
"go1.x",
# java runtimes in descending order
"java11",
"java8.al2",
"java8",
# nodejs runtimes in descending order
"nodejs14.x",
"nodejs12.x",
"nodejs10.x",
# python runtimes in descending order
"python3.9",
"python3.8",
"python3.7",
"python3.6",
"python2.7",
# ruby runtimes in descending order
"ruby2.7",
"ruby2.5",
]
LAMBDA_IMAGES_RUNTIMES_MAP = {
"dotnet5.0": "amazon/dotnet5.0-base",
"dotnetcore3.1": "amazon/dotnetcore3.1-base",
"dotnetcore2.1": "amazon/dotnetcore2.1-base",
"go1.x": "amazon/go1.x-base",
"java11": "amazon/java11-base",
"java8.al2": "amazon/java8.al2-base",
"java8": "amazon/java8-base",
"nodejs14.x": "amazon/nodejs14.x-base",
"nodejs12.x": "amazon/nodejs12.x-base",
"nodejs10.x": "amazon/nodejs10.x-base",
"python3.9": "amazon/python3.9-base",
"python3.8": "amazon/python3.8-base",
"python3.7": "amazon/python3.7-base",
"python3.6": "amazon/python3.6-base",
"python2.7": "amazon/python2.7-base",
"ruby2.7": "amazon/ruby2.7-base",
"ruby2.5": "amazon/ruby2.5-base",
}
LAMBDA_IMAGES_RUNTIMES = LAMBDA_IMAGES_RUNTIMES_MAP.values()
# Schemas Code lang is a MINIMUM supported version
# - this is why later Lambda runtimes can be mapped to earlier Schemas Code Languages
SAM_RUNTIME_TO_SCHEMAS_CODE_LANG_MAPPING = {
"java8": "Java8",
"java8.al2": "Java8",
"java11": "Java8",
"python3.7": "Python36",
"python3.6": "Python36",
"python3.8": "Python36",
"python3.9": "Python36",
}
| 31.080925
| 113
| 0.598103
|
4c42d650b64e321e3fd971fbd810c8c8bdb75819
| 382
|
py
|
Python
|
Binary Number with Alternating Bits (693)/693.py
|
WeaverDyl/LeetCode-Solutions
|
5b37e26390bcda69ffcb1142cc03e193f74735e6
|
[
"MIT"
] | null | null | null |
Binary Number with Alternating Bits (693)/693.py
|
WeaverDyl/LeetCode-Solutions
|
5b37e26390bcda69ffcb1142cc03e193f74735e6
|
[
"MIT"
] | null | null | null |
Binary Number with Alternating Bits (693)/693.py
|
WeaverDyl/LeetCode-Solutions
|
5b37e26390bcda69ffcb1142cc03e193f74735e6
|
[
"MIT"
] | null | null | null |
# Runtime: 20 ms
# Beats 100% of Python submissions
class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
while n != 0:
prev_bit = n & 1
n = n >> 1
curr_bit = n & 1
if prev_bit == curr_bit:
return False
return True
| 21.222222
| 36
| 0.442408
|
a7085f7cd78ca29a2b4e8567acab231c470f34cb
| 80
|
py
|
Python
|
ocradmin/projects/tasks.py
|
mikesname/ocropodium
|
a3e379cca38dc1999349bf4e9b5608e81dc54b10
|
[
"Apache-2.0"
] | 1
|
2018-04-18T20:39:02.000Z
|
2018-04-18T20:39:02.000Z
|
ocradmin/projects/tasks.py
|
mikesname/ocropodium
|
a3e379cca38dc1999349bf4e9b5608e81dc54b10
|
[
"Apache-2.0"
] | null | null | null |
ocradmin/projects/tasks.py
|
mikesname/ocropodium
|
a3e379cca38dc1999349bf4e9b5608e81dc54b10
|
[
"Apache-2.0"
] | null | null | null |
"""
Celery functions to be processed in a non-blocking distributed manner.
"""
| 16
| 70
| 0.7375
|
42daa578772ffe2930eb1ea191563bdef9b6ed0b
| 516
|
py
|
Python
|
advance_py/04 Collections/defaultdict_start.py
|
joejoeyjoseph/playground
|
fa739d51635823b866fafd1e712760074cfc175c
|
[
"MIT"
] | null | null | null |
advance_py/04 Collections/defaultdict_start.py
|
joejoeyjoseph/playground
|
fa739d51635823b866fafd1e712760074cfc175c
|
[
"MIT"
] | null | null | null |
advance_py/04 Collections/defaultdict_start.py
|
joejoeyjoseph/playground
|
fa739d51635823b866fafd1e712760074cfc175c
|
[
"MIT"
] | null | null | null |
# Demonstrate the usage of defaultdict objects
def main():
# define a list of items that we want to count
fruits = ['apple', 'pear', 'orange', 'banana',
'apple', 'grape', 'banana', 'banana']
# use a dictionary to count each element
fruitCounter = {}
# Count the elements in the list
for fruit in fruits:
fruitCounter[fruit] += 1
# print the result
for (k, v) in fruitCounter.items():
print(k + ": " + str(v))
if __name__ == "__main__":
main()
| 22.434783
| 51
| 0.587209
|
c7217f0c821779672fd2e857185c15af50449bad
| 12,005
|
py
|
Python
|
bert/run_bert.py
|
qianyingw/bioner
|
8f62f1871497b16e0e7bd7c083f758a01657cbcc
|
[
"MIT"
] | 1
|
2021-08-23T14:24:33.000Z
|
2021-08-23T14:24:33.000Z
|
bert/run_bert.py
|
qianyingw/pre-pico
|
8f62f1871497b16e0e7bd7c083f758a01657cbcc
|
[
"MIT"
] | null | null | null |
bert/run_bert.py
|
qianyingw/pre-pico
|
8f62f1871497b16e0e7bd7c083f758a01657cbcc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 17:02:50 2020
@author: qwang
"""
import os
os.chdir("/home/qwang/pre-pico/bert")
# import sys
# sys.path[0] = sys.path[0][:-5]
import random
import json
import torch
from torch.utils.data import DataLoader
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import DistilBertTokenizerFast, DistilBertForTokenClassification
from transformers import AdamW, get_linear_schedule_with_warmup
from bert_args import get_args
import bert_utils
from bert_utils import tokenize_encode, EncodingDataset, PadDoc, plot_prfs
import bert_fn
import bert_crf_fn
from bert_model import BERT_CRF, BERT_LSTM_CRF, Distil_CRF
#%% Load data
args = get_args()
args.epochs = 20
args.lr = 1e-3
args.warm_frac = 0.1 # 0.1
args.exp_dir = "/media/mynewdrive/pico/exp/bert_crf/temp"
args.pre_wgts = 'pubmed-full' # ['distil', 'bert', 'biobert', 'pubmed-full', 'pubmed-abs']
args.model = 'bert_crf' # ['bert', 'bert_crf', 'bert_lstm_crf', 'distil', 'distil_crf']
args.save_model = True
with open('/media/mynewdrive/pico/exp/bert_crf/bc6_full/bc6_full_prfs.json') as f:
js = json.load(f)
from argparse import Namespace
args = Namespace(**js['args'])
idx2tag = js['idx2tag']
idx2tag = {int(idx): tag for idx, tag in idx2tag.items()}
tag2idx = {tag: idx for idx, tag in idx2tag.items()}
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False # This makes things slower
# Load json file
json_path = os.path.join(args.data_dir, "tsv/18mar_output/pico_18mar.json")
# json_path = os.path.join(args.data_dir, "tsv/output/b1.json")
train_seqs, train_tags = bert_utils.load_pico(json_path, group='train')
valid_seqs, valid_tags = bert_utils.load_pico(json_path, group='valid')
test_seqs, test_tags = bert_utils.load_pico(json_path, group='test')
# train_data = bert_utils.load_conll(os.path.join(args.data_dir, "train.txt"))
# valid_data = bert_utils.load_conll(os.path.join(args.data_dir, "valid.txt"))
# test_data = bert_utils.load_conll(os.path.join(args.data_dir, "test.txt"))
# Unique tags
# all_tags = train_tags + valid_tags
# tag_set = set(t for tags in all_tags for t in tags)
# tag2idx = {tag: idx for idx, tag in enumerate(tag_set)}
# idx2tag = {idx: tag for tag, idx in tag2idx.items()}
#%% Encoding and DataLoader
n_tags = 13
# Define 'Fast' Tokenizer
if args.pre_wgts == 'distil':
# tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased', num_labels=n_tags)
elif args.pre_wgts == 'biobert':
tokenizer = BertTokenizerFast.from_pretrained('dmis-lab/biobert-v1.1', num_labels=n_tags)
elif args.pre_wgts == 'pubmed-full':
tokenizer = BertTokenizerFast.from_pretrained('microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext', num_labels=n_tags)
elif args.pre_wgts == 'pubmed-abs':
tokenizer = BertTokenizerFast.from_pretrained('microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract', num_labels=n_tags)
else: # args.pre_wgts == 'bert-base'
# tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased', num_labels=n_tags)
# Tokenize seqs & encoding tags (set tags for non-first sub tokens to -100)
train_inputs = tokenize_encode(train_seqs, train_tags, tag2idx, tokenizer)
valid_inputs = tokenize_encode(valid_seqs, valid_tags, tag2idx, tokenizer)
test_inputs = tokenize_encode(test_seqs, test_tags, tag2idx, tokenizer)
# Torch Dataset
train_dataset = EncodingDataset(train_inputs)
valid_dataset = EncodingDataset(valid_inputs)
test_dataset = EncodingDataset(test_inputs)
# temp = train_dataset[99]
# temp['tags']
# temp['input_ids']
# temp['attention_mask']
# temp['tags']
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, collate_fn=PadDoc())
valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=True, collate_fn=PadDoc())
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, collate_fn=PadDoc())
# batch = next(iter(train_loader))
# input_ids_batch, attn_masks_batch, tags_batch, lens = batch
#%% Model & Optimizer & Scheduler
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
if args.pre_wgts == 'distil':
pre_wgts = "distilbert-base-uncased"
elif args.pre_wgts == 'biobert':
pre_wgts = "dmis-lab/biobert-v1.1"
elif args.pre_wgts == 'pubmed-full':
pre_wgts = "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext"
elif args.pre_wgts == 'pubmed-abs':
pre_wgts = "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract"
else: # args.pre_wgts == 'bert-base'
pre_wgts = "bert-base-uncased"
if args.model == 'bert':
model = BertForTokenClassification.from_pretrained(pre_wgts, num_labels=n_tags)
if args.model == 'bert_crf':
model = BERT_CRF.from_pretrained(pre_wgts, num_labels=n_tags)
if args.model == 'bert_lstm_crf':
model = BERT_LSTM_CRF.from_pretrained(pre_wgts, num_labels=n_tags)
if args.model == 'distil':
model = DistilBertForTokenClassification.from_pretrained(pre_wgts, num_labels=n_tags)
if args.model == 'distil_crf':
model = Distil_CRF.from_pretrained(pre_wgts, num_labels=n_tags)
model.to(device)
optimizer = AdamW(model.parameters(), lr=args.lr)
# Slanted triangular Learning rate scheduler
total_steps = len(train_loader) * args.epochs // args.accum_step
warm_steps = int(total_steps * args.warm_frac)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warm_steps, num_training_steps=total_steps)
#%% Train the model
if os.path.exists(args.exp_dir) == False:
os.makedirs(args.exp_dir)
# Create args and output dictionary (for json output)
output_dict = {'args': vars(args), 'prfs': {}}
# For early stopping
n_worse = 0
min_valid_loss = float('inf')
for epoch in range(args.epochs):
if args.model in ['distil_crf', 'bert_crf', 'bert_lstm_crf']:
train_scores = bert_crf_fn.train_fn(model, train_loader, idx2tag, optimizer, scheduler, tokenizer, args.clip, args.accum_step, device)
valid_scores = bert_crf_fn.valid_fn(model, valid_loader, idx2tag, tokenizer, device)
if args.model in ['distil', 'bert']:
train_scores = bert_fn.train_fn(model, train_loader, idx2tag, optimizer, scheduler, tokenizer, args.clip, args.accum_step, device)
valid_scores = bert_fn.valid_fn(model, valid_loader, idx2tag, tokenizer, device)
# Update output dictionary
output_dict['prfs'][str('train_'+str(epoch+1))] = train_scores
output_dict['prfs'][str('valid_'+str(epoch+1))] = valid_scores
# Save scores
is_best = (valid_scores['loss'] < min_valid_loss)
if is_best == True:
min_valid_loss = valid_scores['loss']
# Save model
if args.save_model == True:
bert_utils.save_checkpoint({'epoch': epoch+1,
'state_dict': model.state_dict(),
'optim_Dict': optimizer.state_dict()},
is_best = is_best, checkdir = args.exp_dir)
print("\n\nEpoch {}/{}...".format(epoch+1, args.epochs))
print('[Train] loss: {0:.3f} | acc: {1:.2f}% | f1: {2:.2f}% | prec: {3:.2f}% | rec: {4:.2f}%'.format(
train_scores['loss'], train_scores['acc']*100, train_scores['f1']*100, train_scores['prec']*100, train_scores['rec']*100))
print('[Valid] loss: {0:.3f} | acc: {1:.2f}% | f1: {2:.2f}% | prec: {3:.2f}% | rec: {4:.2f}%\n'.format(
valid_scores['loss'], valid_scores['acc']*100, valid_scores['f1']*100, valid_scores['prec']*100, valid_scores['rec']*100))
# Early stopping
# if valid_scores['loss']-min_valid_loss > 0: # args.stop_c1) and (max_valid_f1-valid_scores['f1'] > args.stop_c2):
# n_worse += 1
# if n_worse == 5: # args.stop_p:
# print("Early stopping")
# break
# Write performance and args to json
prfs_name = os.path.basename(args.exp_dir)+'_prfs.json'
prfs_path = os.path.join(args.exp_dir, prfs_name)
output_dict['idx2tag'] = idx2tag
with open(prfs_path, 'w') as fout:
json.dump(output_dict, fout, indent=4)
#%% Evaluation on valid/test set (classification report)
from seqeval.metrics import classification_report
from torchcrf import CRF
# crf = CRF(13, batch_first=True)
def cls_report(data_loader, pth_path, add_crf=True, device=torch.device('cpu')):
# Load checkpoin
checkpoint = torch.load(pth_path, map_location=device)
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict, strict=False)
model.cpu()
model.eval()
epoch_preds, epoch_trues = [], []
for j, batch in enumerate(data_loader):
input_ids = batch[0].to(device) # [batch_size, seq_len]
attn_mask = batch[1].to(device) # [batch_size, seq_len]
tags = batch[2].to(device) # [batch_size, seq_len]
true_lens = batch[3] # [batch_size]
word_ids = batch[4].to(device) # [batch_size, seq_len]
# print(true_lens)
if add_crf == True:
preds_cut, probs_cut, mask_cut, log_likelihood = model(input_ids, attention_mask = attn_mask, labels = tags)
# preds_cut = model.crf.decode(probs_cut, mask=mask_cut)
for sin_preds, sin_tags, sin_lens, sin_wids in zip(preds_cut, tags, true_lens, word_ids):
sin_wids = sin_wids[1:sin_lens+1]
sin_tags = sin_tags[1:sin_lens+1] # list of lists (1st/last tag is -100 so need to move one step)
pre_wid = None
sin_preds_new, sin_tags_new = [], []
for p, t, wid in zip(sin_preds, sin_tags, sin_wids):
if wid != pre_wid:
sin_preds_new.append(p)
sin_tags_new.append(t.tolist())
pre_wid = wid
epoch_preds.append(sin_preds_new) # list of lists
epoch_trues.append(sin_tags_new)
else:
outputs = model(input_ids, attention_mask = attn_mask, labels = tags)
logits = outputs[1] # [batch_size, seq_len, num_tags]
preds = torch.argmax(logits, dim=2) # [batch_size, seq_len]
for sin_preds, sin_tags, sin_lens, sin_wids in zip(preds, tags, true_lens, word_ids):
# list of lists (1st/last tag is -100 so need to move one step)
sin_wids = sin_wids[1:sin_lens+1]
sin_tags = sin_tags[1:sin_lens+1]
sin_preds = sin_preds[1:sin_lens+1]
pre_wid = None
sin_preds_new, sin_tags_new = [], []
for p, t, wid in zip(sin_preds, sin_tags, sin_wids):
if wid != pre_wid:
sin_preds_new.append(p.tolist())
sin_tags_new.append(t.tolist())
pre_wid = wid
epoch_preds.append(sin_preds_new) # list of lists
epoch_trues.append(sin_tags_new)
# Convert epoch_idxs to epoch_tags
epoch_tag_preds = bert_utils.epoch_idx2tag(epoch_preds, idx2tag)
epoch_tag_trues = bert_utils.epoch_idx2tag(epoch_trues, idx2tag)
print(classification_report(epoch_tag_trues, epoch_tag_preds, output_dict=False, digits=4))
pth_path = os.path.join(args.exp_dir, 'best.pth.tar')
cls_report(valid_loader, pth_path, add_crf=True)
cls_report(test_loader, pth_path, add_crf=True)
pth_path = os.path.join(args.exp_dir, 'last.pth.tar')
cls_report(valid_loader, pth_path, add_crf=True)
cls_report(test_loader, pth_path, add_crf=True)
| 43.813869
| 142
| 0.68005
|
ad969a195cb9898c5febea88a0c73a7f0a3094fa
| 6,694
|
py
|
Python
|
databuilder/extractor/dashboard/tableau/tableau_external_table_extractor.py
|
keyko-io/nevermined-amundsen-databuilder
|
e48f803320e5ce16168c9ed01adee3ea4f7d51cf
|
[
"Apache-2.0"
] | null | null | null |
databuilder/extractor/dashboard/tableau/tableau_external_table_extractor.py
|
keyko-io/nevermined-amundsen-databuilder
|
e48f803320e5ce16168c9ed01adee3ea4f7d51cf
|
[
"Apache-2.0"
] | null | null | null |
databuilder/extractor/dashboard/tableau/tableau_external_table_extractor.py
|
keyko-io/nevermined-amundsen-databuilder
|
e48f803320e5ce16168c9ed01adee3ea4f7d51cf
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import Any, Dict, Iterator
from pyhocon import ConfigFactory, ConfigTree
import databuilder.extractor.dashboard.tableau.tableau_dashboard_constants as const
from databuilder import Scoped
from databuilder.extractor.base_extractor import Extractor
from databuilder.extractor.dashboard.tableau.tableau_dashboard_utils import TableauGraphQLApiExtractor,\
TableauDashboardUtils
from databuilder.transformer.base_transformer import ChainedTransformer
from databuilder.transformer.dict_to_model import DictToModel, MODEL_CLASS
LOGGER = logging.getLogger(__name__)
class TableauGraphQLExternalTableExtractor(TableauGraphQLApiExtractor):
"""
Implements the extraction-time logic for parsing the GraphQL result and transforming into a dict
that fills the TableMetadata model.
"""
EXTERNAL_CLUSTER_NAME = const.EXTERNAL_CLUSTER_NAME
EXTERNAL_SCHEMA_NAME = const.EXTERNAL_SCHEMA_NAME
def execute(self) -> Iterator[Dict[str, Any]]:
response = self.execute_query()
for table in response['databases']:
if table['connectionType'] in ['google-sheets', 'salesforce', 'excel-direct']:
for downstreamTable in table['tables']:
data = {
'cluster': self._conf.get_string(TableauGraphQLExternalTableExtractor.EXTERNAL_CLUSTER_NAME),
'database': TableauDashboardUtils.sanitize_database_name(
table['connectionType']
),
'schema': TableauDashboardUtils.sanitize_schema_name(table['name']),
'name': TableauDashboardUtils.sanitize_table_name(downstreamTable['name']),
'description': table['description']
}
yield data
else:
data = {
'cluster': self._conf.get_string(TableauGraphQLExternalTableExtractor.EXTERNAL_CLUSTER_NAME),
'database': TableauDashboardUtils.sanitize_database_name(table['connectionType']),
'schema': self._conf.get_string(TableauGraphQLExternalTableExtractor.EXTERNAL_SCHEMA_NAME),
'name': TableauDashboardUtils.sanitize_table_name(table['name']),
'description': table['description']
}
yield data
class TableauDashboardExternalTableExtractor(Extractor):
"""
Creates the "external" Tableau tables.
In this context, "external" tables are "tables" that are not from a typical database, and are loaded
using some other data format, like CSV files.
This extractor has been tested with the following types of external tables:
Excel spreadsheets
Text files (including CSV files)
Salesforce connections
Google Sheets connections
Excel spreadsheets, Salesforce connections, and Google Sheets connections are all classified as
"databases" in terms of Tableau's Metadata API, with their "subsheets" forming their "tables" when
present. However, these tables are not assigned a schema, this extractor chooses to use the name
parent sheet as the schema, and assign a new table to each subsheet. The connection type is
always used as the database, and for text files, the schema is set using the EXTERNAL_SCHEMA_NAME
config option. Since these external tables are usually named for human consumption only and often
contain a wider range of characters, all inputs are transformed to remove any problematic
occurences before they are inserted: see the sanitize methods TableauDashboardUtils for specifics.
A more concrete example: if one had a Google Sheet titled "Growth by Region & County" with 2 subsheets called
"FY19 Report" and "FY20 Report", two tables would be generated with the following keys:
googlesheets://external.growth_by_region_county/FY_19_Report
googlesheets://external.growth_by_region_county/FY_20_Report
"""
API_VERSION = const.API_VERSION
CLUSTER = const.CLUSTER
EXCLUDED_PROJECTS = const.EXCLUDED_PROJECTS
EXTERNAL_CLUSTER_NAME = const.EXTERNAL_CLUSTER_NAME
EXTERNAL_SCHEMA_NAME = const.EXTERNAL_SCHEMA_NAME
EXTERNAL_TABLE_TYPES = const.EXTERNAL_TABLE_TYPES
SITE_NAME = const.SITE_NAME
TABLEAU_HOST = const.TABLEAU_HOST
TABLEAU_ACCESS_TOKEN_NAME = const.TABLEAU_ACCESS_TOKEN_NAME
TABLEAU_ACCESS_TOKEN_SECRET = const.TABLEAU_ACCESS_TOKEN_SECRET
VERIFY_REQUEST = const.VERIFY_REQUEST
def init(self, conf: ConfigTree) -> None:
self._conf = conf
self.query = """query externalTables($externalTableTypes: [String]) {
databases (filter: {connectionTypeWithin: $externalTableTypes}) {
name
connectionType
description
tables {
name
}
}
}"""
self.query_variables = {
'externalTableTypes': self._conf.get_list(TableauDashboardExternalTableExtractor.EXTERNAL_TABLE_TYPES)}
self._extractor = self._build_extractor()
transformers = []
dict_to_model_transformer = DictToModel()
dict_to_model_transformer.init(
conf=Scoped.get_scoped_conf(self._conf, dict_to_model_transformer.get_scope()).with_fallback(
ConfigFactory.from_dict(
{MODEL_CLASS: 'databuilder.models.table_metadata.TableMetadata'})))
transformers.append(dict_to_model_transformer)
self._transformer = ChainedTransformer(transformers=transformers)
def extract(self) -> Any:
record = self._extractor.extract()
if not record:
return None
return self._transformer.transform(record=record)
def get_scope(self) -> str:
return 'extractor.tableau_external_table'
def _build_extractor(self) -> TableauGraphQLExternalTableExtractor:
"""
Builds a TableauGraphQLExternalTableExtractor. All data required can be retrieved with a single GraphQL call.
:return: A TableauGraphQLExternalTableExtractor that creates external table metadata entities.
"""
extractor = TableauGraphQLExternalTableExtractor()
config_dict = {
TableauGraphQLApiExtractor.QUERY_VARIABLES: self.query_variables,
TableauGraphQLApiExtractor.QUERY: self.query}
tableau_extractor_conf = \
Scoped.get_scoped_conf(self._conf, extractor.get_scope())\
.with_fallback(self._conf)\
.with_fallback(ConfigFactory.from_dict(config_dict))
extractor.init(conf=tableau_extractor_conf)
return extractor
| 47.140845
| 117
| 0.699731
|
c5df30f2fb164861ba55826e16e6b7e25a6324fb
| 3,723
|
py
|
Python
|
modelchimp/views/api/experiment_custom_object.py
|
akarsh3007/modelchimp
|
1b4a53547d6835867df0d4ca7be83ea6c805c8ce
|
[
"BSD-2-Clause"
] | null | null | null |
modelchimp/views/api/experiment_custom_object.py
|
akarsh3007/modelchimp
|
1b4a53547d6835867df0d4ca7be83ea6c805c8ce
|
[
"BSD-2-Clause"
] | null | null | null |
modelchimp/views/api/experiment_custom_object.py
|
akarsh3007/modelchimp
|
1b4a53547d6835867df0d4ca7be83ea6c805c8ce
|
[
"BSD-2-Clause"
] | null | null | null |
import re
from django.http import HttpResponse
from rest_framework import status
from rest_framework import generics, mixins, views
from rest_framework.response import Response
from modelchimp.serializers.experiment_custom_object import ExperimentCustomObjectSerializer
from modelchimp.models.experiment_custom_object import ExperimentCustomObject
from modelchimp.models.membership import Membership
from modelchimp.models.machinelearning_model import MachineLearningModel
from modelchimp.api_permissions import HasProjectMembership
from rest_framework.permissions import IsAuthenticated
class ExperimentCustomObjectAPI(mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
generics.GenericAPIView):
serializer_class = ExperimentCustomObjectSerializer
queryset = ExperimentCustomObject.objects.all()
permission_classes = (IsAuthenticated, HasProjectMembership)
def retrieve(self, project_id):
try:
# Get the filter parameters from the request
params = self.request.query_params
custom_object_id = None
# Check if the duration exists
if 'custom-object-id' in params:
custom_object_id = params['custom-object-id']
if not self._is_uuid4_pattern(custom_object_id):
raise TypeError('custom_object_id is of wrong type')
except Exception as e:
return Response("Error: %s" % e, status=status.HTTP_400_BAD_REQUEST)
custom_object_instance = self.get_queryset().get(custom_object_id=custom_object_id)
custom_object_pointer = custom_object_instance.custom_object_file
response = HttpResponse(custom_object_pointer,content_type='application/gzip')
response['Content-Disposition'] = 'attachment; filename=NameOfFile'
return response
def create(self, request, project_id):
project_id = request.data.get('project_id')
user = self.request.user
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(status=status.HTTP_201_CREATED)
return Response(status=status.HTTP_400_BAD_REQUEST)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
def get(self, request, project_id, *args, **kwargs):
return self.retrieve(project_id, *args, **kwargs)
def _is_uuid4_pattern(self, text):
pattern = re.compile(
(
'[a-f0-9]{8}-' +
'[a-f0-9]{4}-' +
'4' + '[a-f0-9]{3}-' +
'[89ab][a-f0-9]{3}-' +
'[a-f0-9]{12}$'
),
re.IGNORECASE
)
return pattern.match(text)
class ExperimentCustomObjectDetailAPI(mixins.RetrieveModelMixin, generics.ListAPIView):
serializer_class = ExperimentCustomObjectSerializer
queryset = ExperimentCustomObject.objects.all()
permission_classes = (IsAuthenticated, HasProjectMembership)
def list(self, request, model_id):
try:
ml_model_obj = MachineLearningModel.objects.get(id=model_id)
except Exception as e:
return Response("Error: %s" % e, status=status.HTTP_400_BAD_REQUEST)
access = Membership.objects.filter(user=self.request.user, project=ml_model_obj.project).exists()
if not access:
return Response(status=status.HTTP_403_FORBIDDEN)
queryset = self.get_queryset().filter(ml_model_id=model_id)
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
| 37.606061
| 105
| 0.681708
|
6e84941d9b1ac1dfd237fb0f091c1e0e1d0c27d6
| 16,916
|
py
|
Python
|
src/dcos_e2e/_vendor/dcos_launch/platforms/arm.py
|
Fabs/dcos-e2e
|
8836dfa5b83f9d61e92b8b4bd8b058404a3bdc20
|
[
"Apache-2.0"
] | null | null | null |
src/dcos_e2e/_vendor/dcos_launch/platforms/arm.py
|
Fabs/dcos-e2e
|
8836dfa5b83f9d61e92b8b4bd8b058404a3bdc20
|
[
"Apache-2.0"
] | null | null | null |
src/dcos_e2e/_vendor/dcos_launch/platforms/arm.py
|
Fabs/dcos-e2e
|
8836dfa5b83f9d61e92b8b4bd8b058404a3bdc20
|
[
"Apache-2.0"
] | null | null | null |
""" This module is intended to allow deploying of arbitrary Azure Resource
Manager (ARM) templates and describe the provided DC/OS cluster. For more
information on how to configure and interpret these templates, see:
https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-authoring-templates
"""
import contextlib
import copy
import logging
import re
import requests
import retrying
from azure.common.credentials import ServicePrincipalCredentials
from azure.common.exceptions import CloudError
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource.resources.v2016_02_01 import ResourceManagementClient
from azure.mgmt.resource.resources.v2016_02_01.models import (DeploymentMode,
DeploymentProperties,
ResourceGroup)
from azure.monitor import MonitorClient
from ...dcos_launch.util import DeploymentError
from ...dcos_test_utils.helpers import Host
log = logging.getLogger(__name__)
# This interface is designed to only use a single deployment.
# Being as the azure interface is based around resource groups, deriving
# deployment name from group names makes it easier to attach to creating deploys
DEPLOYMENT_NAME = '{}-Deployment'
def validate_hostname_prefix(prefix):
"""Hostname prefixes in azure templates are used to link a variety of resources
Not all of these resources will have their constraints checked at when ARM
valiation occurs. This check in particular was aggravating as no docs surfaced
this issue, so logs needed to be scanned just to discover this error
"""
assert re.match('^[a-z][a-z0-9-]{1,61}[a-z0-9]$', prefix), 'Invalid DNS prefix: {}'.format(prefix)
def check_json_object(obj):
""" Simple check to fill in the map for automatic parameter casting
JSON objects must be represented as dict at this level
"""
assert isinstance(obj, dict), 'Invalid JSON object: {}'.format(obj)
return obj
def check_array(arr):
""" Simple check to fill in the map for automatic parameter casting
JSON arrays must be represented as lists at this level
"""
assert isinstance(arr, list), 'Invalid array: {}'.format(arr)
return arr
def nic_to_host(nic, public_ip=None):
assert len(nic.ip_configurations) == 1
ip_config = nic.ip_configurations[0]
if ip_config.public_ip_address is None and public_ip is None:
return Host(ip_config.private_ip_address, None)
if public_ip is None:
return Host(ip_config.private_ip_address, ip_config.public_ip_address.ip_address)
return Host(ip_config.private_ip_address, public_ip)
class AzureWrapper:
def __init__(self, location: str, subscription_id: str, client_id: str, client_secret: str, tenant_id: str):
self.credentials = ServicePrincipalCredentials(
client_id=client_id,
secret=client_secret,
tenant=tenant_id)
self.rmc = ResourceManagementClient(self.credentials, subscription_id)
self.nmc = NetworkManagementClient(self.credentials, subscription_id)
self.mc = MonitorClient(self.credentials, subscription_id)
# location is included to keep a similar model as dcos_launch.platforms.aws.BotoWrapper
self.location = location
def deploy_template_to_new_resource_group(
self, template_url, group_name, parameters, tags=None, template=None):
if tags is None:
tags = dict()
log.info('Checking deployment parameters vs template before starting...')
deployment_properties = self.create_deployment_properties(
template_url, parameters, template=template)
deployment_name = DEPLOYMENT_NAME.format(group_name)
# Resource group must be created before validation can occur
if self.rmc.resource_groups.check_existence(group_name):
raise Exception("Group name already exists / taken: {}".format(group_name))
log.info('Starting resource group_creation')
def get_all_details(error):
formatted_message = '{}: {}\n\n'.format(error.code, error.message)
if error.details is None:
return formatted_message
for d in error.details:
formatted_message += get_all_details(d)
return formatted_message
with contextlib.ExitStack() as stack:
self.rmc.resource_groups.create_or_update(
group_name,
ResourceGroup(location=self.location, tags=tags))
# Ensure the resource group will be deleted if the following steps fail
stack.callback(self.rmc.resource_groups.delete, group_name)
log.info('Resource group created: {}'.format(group_name))
log.info('Checking with Azure to validate template deployment')
result = self.rmc.deployments.validate(
group_name, deployment_name, properties=deployment_properties)
if result.error:
raise Exception("Template verification failed!\n{}".format(get_all_details(result.error)))
log.info('Template successfully validated')
log.info('Starting template deployment')
self.rmc.deployments.create_or_update(
group_name, deployment_name, deployment_properties, raw=True)
stack.pop_all()
log.info('Successfully started template deployment')
def create_deployment_properties(self, template_url, parameters, template: dict=None):
""" Pulls the targeted template, checks parameter specs and casts
user provided parameters to the appropriate type. Assertion is raised
if there are unused parameters or invalid casting
"""
user_parameters = copy.deepcopy(parameters)
type_cast_map = {
'string': str,
'securestring': str,
'int': int,
'bool': bool,
'object': check_json_object,
'secureObject': check_json_object,
'array': check_array}
log.debug('Pulling Azure template for parameter validation...')
if template is None:
r = requests.get(template_url)
r.raise_for_status()
template = r.json()
if 'parameters' not in template:
assert user_parameters is None, 'This template does not support parameters, ' \
'yet parameters were supplied: {}'.format(user_parameters)
log.debug('Constructing DeploymentProperties from user parameters: {}'.format(parameters))
template_parameters = {}
for k, v in template['parameters'].items():
if k in user_parameters:
# All templates parameters are required to have a type field.
# Azure requires that parameters be provided as {key: {'value': value}}.
template_parameters[k] = {
'value': type_cast_map[v['type']](user_parameters.pop(k))}
log.debug('Final template parameters: {}'.format(template_parameters))
if len(user_parameters) > 0:
raise Exception('Unrecognized template parameters were supplied: {}'.format(user_parameters))
return DeploymentProperties(
template=template,
mode=DeploymentMode.incremental,
parameters=template_parameters)
class DcosAzureResourceGroup:
""" An abstraction for cleanly handling the life cycle of a DC/OS template
deployment. Operations include: create, wait, describe host IPs, and delete
"""
def __init__(self, group_name, azure_wrapper):
self.group_name = group_name
self.azure_wrapper = azure_wrapper
@classmethod
def deploy_acs_template(
cls, azure_wrapper: AzureWrapper, template_url: str, group_name: str,
public_key, master_prefix, agent_prefix, admin_name, oauth_enabled,
vm_size, agent_count, name_suffix, vm_diagnostics_enabled):
""" Creates a new resource group and deploys a ACS DC/OS template to it
using a subset of parameters for a simple deployment. To see a full
listing of parameters, including description and formatting, go to:
gen/azure/templates/acs.json in this repository.
Args:
azure_wrapper: see above
template_url: Azure-accessible location for the desired ACS template
group_name: name used for the new resource group that will be created
for this template deployment
Args that wrap template parameters:
public_key -> sshRSAPublicKey
master_prefix -> masterEndpointDNSNamePrefix
agent_prefix -> agentEndpointDNSNamePrefix
admin_name -> linuxAdminUsername
vm_size -> agentVMSize
agent_count -> agentCount
name_suffix -> nameSuffix
oauth_enabled -> oauthEnabled
vm_diagnostics_enabled -> enableVMDiagnostics
"""
assert master_prefix != agent_prefix, 'Master and agents must have unique prefixs'
validate_hostname_prefix(master_prefix)
validate_hostname_prefix(agent_prefix)
parameters = {
'sshRSAPublicKey': public_key,
'masterEndpointDNSNamePrefix': master_prefix,
'agentEndpointDNSNamePrefix': agent_prefix,
'linuxAdminUsername': admin_name,
'agentVMSize': vm_size,
'agentCount': agent_count,
'nameSuffix': name_suffix,
'oauthEnabled': oauth_enabled,
'enableVMDiagnostics': vm_diagnostics_enabled}
azure_wrapper.deploy_template_to_new_resource_group(template_url, group_name, parameters)
return cls(group_name, azure_wrapper)
def get_deployment_state(self):
return self.azure_wrapper.rmc.deployments.get(
self.group_name, DEPLOYMENT_NAME.format(self.group_name)).properties.provisioning_state
def wait_for_deployment(self, timeout=60 * 60):
"""
Azure will not register a template instantly after deployment, so
CloudError must be expected as retried. Once the ops are retrieved, this
loops through all operations in the group's only deployment
if any operations are still in progress, then this function will sleep
once all operations are complete, if there any failures, those will be
printed to the log stream
"""
log.info('Waiting for deployment to finish')
def azure_failure_report():
deploy_ops = self.azure_wrapper.rmc.deployment_operations.list(
self.group_name, DEPLOYMENT_NAME.format(self.group_name))
failures = [(op.properties.status_code, op.properties.status_message) for op
in deploy_ops if op.properties.provisioning_state == 'Failed']
for failure in failures:
log.error('Deployment operation failed! {}: {}'.format(*failure))
@retrying.retry(
wait_fixed=60 * 1000, stop_max_delay=timeout * 1000,
retry_on_result=lambda res: res is False,
retry_on_exception=lambda ex: isinstance(ex, CloudError))
def check_deployment_operations():
deploy_state = self.get_deployment_state()
if deploy_state == 'Succeeded':
return True
elif deploy_state == 'Failed':
log.info('Deployment failed. Checking deployment operations.')
azure_failure_report()
raise DeploymentError('Azure Deployment Failed!')
else:
log.info('Waiting for deployment. Current state: {}. It should either be Succeeded/Failed.'.format(
deploy_state))
return False
try:
check_deployment_operations()
except retrying.RetryError:
log.info('Deployment failed. Checking deployment operations.')
azure_failure_report()
raise DeploymentError("Azure Deployment Failed!")
def list_resources(self, filter_string):
yield from self.azure_wrapper.rmc.resource_groups.list_resources(
self.group_name, filter=(filter_string))
def get_scale_set_nics(self, name_substring=None):
for resource in self.list_resources("resourceType eq 'Microsoft.Compute/virtualMachineScaleSets'"):
if name_substring and name_substring not in resource.name:
continue
yield from self.azure_wrapper.nmc.network_interfaces.list_virtual_machine_scale_set_network_interfaces(
self.group_name, resource.name)
def get_public_ip_address(self, name_substring=None):
for resource in self.list_resources("resourceType eq 'Microsoft.Network/publicIPAddresses'"):
if name_substring and name_substring not in resource.name:
continue
return self.azure_wrapper.nmc.public_ip_addresses.get(self.group_name, resource.name)
@property
def public_agent_lb_fqdn(self):
return self.get_public_ip_address('agent-ip').dns_settings.fqdn
@property
def public_master_lb_fqdn(self):
return self.get_public_ip_address('master-ip').dns_settings.fqdn
@property
def master_nics(self):
""" The only instances of networkInterface Resources are for masters
"""
for resource in self.list_resources("resourceType eq 'Microsoft.Network/networkInterfaces'"):
assert 'master' in resource.name, 'Expected to only find master NICs, not: {}'.format(resource.name)
yield self.azure_wrapper.nmc.network_interfaces.get(self.group_name, resource.name)
def get_master_ips(self):
""" Traffic from abroad is routed to a master wth the public master
loadbalancer FQDN and the VM index plus 2200 (so the first master will be at 2200)
"""
public_lb_ip = self.public_master_lb_fqdn
return [Host(nic_to_host(nic).private_ip, '{}:{}'.format(public_lb_ip, 2200 + int(nic.name[-1])))
for nic in self.master_nics]
def get_private_agent_ips(self):
return [nic_to_host(nic) for nic in self.get_scale_set_nics('private')]
def get_public_agent_ips(self):
""" public traffic is routed to public agents via a specific load balancer """
public_lb_ip = self.public_agent_lb_fqdn
return [Host(nic_to_host(nic).private_ip, public_lb_ip)
for nic in self.get_scale_set_nics('public')]
def update_tags(self, new_tags: dict):
rg = self.azure_wrapper.rmc.resource_groups.get(self.group_name)
if rg.tags is None:
rg.tags = dict()
rg.tags.update(new_tags)
self.azure_wrapper.rmc.resource_groups.patch(rg.name, {
'tags': rg.tags,
'location': rg.location}, raw=True)
def delete(self):
log.info('Triggering delete')
self.azure_wrapper.rmc.resource_groups.delete(self.group_name, raw=True)
def __enter__(self):
return self
def __exit__(self, exc_type, exc, exc_tb):
self.delete()
class HybridDcosAzureResourceGroup(DcosAzureResourceGroup):
@property
def master_nics(self):
for resource in self.list_resources("resourceType eq 'Microsoft.Network/networkInterfaces'"):
if 'bootstrap' in resource.name:
# Skipping the bootstrap NICs
continue
assert 'master' in resource.name, 'Expected to find master NICs, not: {}'.format(resource.name)
yield self.azure_wrapper.nmc.network_interfaces.get(self.group_name, resource.name)
def get_master_ips(self):
public_lb_ip = self.public_master_lb_fqdn
return [nic_to_host(nic, public_lb_ip) for nic in self.master_nics]
def get_linux_private_agent_ips(self):
return [nic_to_host(nic) for nic in self.get_scale_set_nics('linpri')]
def get_linux_public_agent_ips(self):
return [nic_to_host(nic, self.linux_public_agent_lb_fqdn)
for nic in self.get_scale_set_nics('linpub')]
def get_windows_public_agent_ips(self):
# this VMSS name is derived from this being the 0-th element in the VMSS list
return [nic_to_host(nic, self.windows_public_agent_lb_fqdn)
for nic in self.get_scale_set_nics('900-vmss')]
def get_windows_private_agent_ips(self):
# this VMSS name is derived from this being the 1-th element in the VMSS list
return [nic_to_host(nic) for nic in self.get_scale_set_nics('901-vmss')]
@property
def linux_public_agent_lb_fqdn(self):
return self.get_public_ip_address('agent-ip-linpub').dns_settings.fqdn
@property
def windows_public_agent_lb_fqdn(self):
return self.get_public_ip_address('agent-ip-wpub').dns_settings.fqdn
| 45.718919
| 115
| 0.676578
|
7f59884addfe7603506144ca1c3ca7ae9ce3d373
| 625
|
py
|
Python
|
cookie_decrypter.py
|
6un9-h0-Dan/PoshC2
|
dd39e62421ed5efd1e895e66b8ad59f034bf6a4c
|
[
"BSD-3-Clause"
] | 1,504
|
2016-07-12T04:14:00.000Z
|
2022-03-31T02:59:30.000Z
|
cookie_decrypter.py
|
6un9-h0-Dan/PoshC2
|
dd39e62421ed5efd1e895e66b8ad59f034bf6a4c
|
[
"BSD-3-Clause"
] | 139
|
2016-10-13T10:41:18.000Z
|
2022-03-31T13:22:47.000Z
|
cookie_decrypter.py
|
6un9-h0-Dan/PoshC2
|
dd39e62421ed5efd1e895e66b8ad59f034bf6a4c
|
[
"BSD-3-Clause"
] | 377
|
2016-07-12T03:10:03.000Z
|
2022-03-31T10:04:13.000Z
|
#!/usr/bin/env python3
from poshc2.Colours import Colours
from poshc2.server.Core import decrypt
from poshc2.server.database.DB import get_keys, database_connect
import sys, re
file = open(sys.argv[1], "r")
database_connect()
result = get_keys()
if result:
for line in file:
if re.search("SessionID", line):
for i in result:
try:
value = decrypt(i[0], line.split('=')[1])
print(Colours.GREEN + "Success with Key %s - %s" % (i[0], value))
except Exception:
print(Colours.RED + "Failed with Key %s" % i[0])
| 28.409091
| 85
| 0.5792
|
ea04ad1a61f11b3cc74e5469b8a6197b5ad36a85
| 12,009
|
py
|
Python
|
tests/test_expectations.py
|
codelover-without-talent/GPflow
|
1af7b1ca7da6687974150a1440d821a106b2159d
|
[
"Apache-2.0"
] | 1
|
2018-10-20T03:02:50.000Z
|
2018-10-20T03:02:50.000Z
|
tests/test_expectations.py
|
codelover-without-talent/GPflow
|
1af7b1ca7da6687974150a1440d821a106b2159d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_expectations.py
|
codelover-without-talent/GPflow
|
1af7b1ca7da6687974150a1440d821a106b2159d
|
[
"Apache-2.0"
] | 1
|
2019-01-12T23:35:26.000Z
|
2019-01-12T23:35:26.000Z
|
# Copyright 2018 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import copy
import pytest
import gpflow
from gpflow.expectations import expectation, quadrature_expectation
from gpflow.probability_distributions import Gaussian, DiagonalGaussian, MarkovGaussian
from gpflow import kernels, mean_functions, features
from gpflow.test_util import session_tf
from gpflow.test_util import cache_tensor
from numpy.testing import assert_allclose
rng = np.random.RandomState(1)
RTOL = 1e-6
class Data:
num_data = 5
num_ind = 4
D_in = 2
D_out = 2
Xmu = rng.randn(num_data, D_in)
Xmu_markov = rng.randn(num_data + 1, D_in) # (N+1)xD
Xcov = rng.randn(num_data, D_in, D_in)
Xcov = Xcov @ np.transpose(Xcov, (0, 2, 1))
Z = rng.randn(num_ind, D_in)
Z2 = rng.randn(num_ind - 1, D_in)
cov_params = rng.randn(num_data + 1, D_in, 2 * D_in) / 2. # (N+1)xDx2D
NN_cov = cov_params @ np.transpose(cov_params, (0, 2, 1)) # (N+1)xDxD
NNplus1_cross = cov_params[:-1] @ np.transpose(cov_params[1:], (0, 2, 1)) # NxDxD
NNplus1_cross = np.concatenate((NNplus1_cross, np.zeros((1, D_in, D_in))), 0) # (N+1)xDxD
Xcov_markov = np.stack([NN_cov, NNplus1_cross]) # 2x(N+1)xDxD
@pytest.fixture
def feature():
return features.InducingPoints(Data.Z)
@cache_tensor
def feature2():
return features.InducingPoints(Data.Z2)
@cache_tensor
def gauss():
return Gaussian(
tf.convert_to_tensor(Data.Xmu),
tf.convert_to_tensor(Data.Xcov))
@cache_tensor
def dirac_gauss():
return Gaussian(
tf.convert_to_tensor(Data.Xmu),
tf.convert_to_tensor(np.zeros((Data.num_data, Data.D_in, Data.D_in))))
@cache_tensor
def gauss_diag():
return DiagonalGaussian(
tf.convert_to_tensor(Data.Xmu),
tf.convert_to_tensor(rng.rand(Data.num_data, Data.D_in)))
@cache_tensor
def dirac_diag():
return DiagonalGaussian(
tf.convert_to_tensor(Data.Xmu),
tf.convert_to_tensor(np.zeros((Data.num_data, Data.D_in))))
@cache_tensor
def markov_gauss():
return MarkovGaussian(
tf.convert_to_tensor(Data.Xmu_markov),
tf.convert_to_tensor(Data.Xcov_markov))
@cache_tensor
def dirac_markov_gauss():
return MarkovGaussian(
tf.convert_to_tensor(Data.Xmu_markov),
tf.convert_to_tensor(np.zeros((2, Data.num_data + 1, Data.D_in, Data.D_in))))
@cache_tensor
def rbf_kern():
return kernels.RBF(Data.D_in, variance=rng.rand(), lengthscales=rng.rand() + 1.)
@cache_tensor
def rbf_kern_2():
# Additional cached rbf kernel for rbf cross covariance tests
return kernels.RBF(Data.D_in, variance=rng.rand(), lengthscales=rng.rand() + 1.)
@cache_tensor
def lin_kern():
return kernels.Linear(Data.D_in, variance=rng.rand())
@cache_tensor
def matern_kern():
return kernels.Matern32(Data.D_in, variance=rng.rand())
@cache_tensor
def rbf_lin_sum_kern():
return kernels.Sum([
kernels.RBF(Data.D_in, variance=rng.rand(), lengthscales=rng.rand() + 1.),
kernels.Linear(Data.D_in, variance=rng.rand())
])
@cache_tensor
def rbf_lin_sum_kern2():
return kernels.Sum([
kernels.Linear(Data.D_in, variance=rng.rand()),
kernels.RBF(Data.D_in, variance=rng.rand(), lengthscales=rng.rand() + 1.),
kernels.Linear(Data.D_in, variance=rng.rand()),
kernels.RBF(Data.D_in, variance=rng.rand(), lengthscales=rng.rand() + 1.),
])
@cache_tensor
def rbf_lin_prod_kern():
return kernels.Product([
kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand() + 1., active_dims=[0]),
kernels.Linear(1, variance=rng.rand(), active_dims=[1])
])
@cache_tensor
def rbf_kern_act_dim_0():
return kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand() + 1., active_dims=[0])
@cache_tensor
def rbf_kern_act_dim_1():
return kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand() + 1., active_dims=[1])
@cache_tensor
def lin_kern_act_dim_0():
return kernels.Linear(1, variance=rng.rand(), active_dims=[0])
@cache_tensor
def lin_kern_act_dim_1():
return kernels.Linear(1, variance=rng.rand(), active_dims=[1])
@cache_tensor
def lin_mean():
return mean_functions.Linear(A=rng.randn(Data.D_in, Data.D_out), b=rng.randn(Data.D_out))
@cache_tensor
def identity_mean():
# Note: Identity can only be used if Din == Dout
return mean_functions.Identity(input_dim=Data.D_in)
@cache_tensor
def const_mean():
return mean_functions.Constant(c=rng.randn(Data.D_out))
@cache_tensor
def zero_mean():
return mean_functions.Zero(output_dim=Data.D_out)
def _check(params):
analytic = expectation(*params)
quad = quadrature_expectation(*params)
session = tf.get_default_session()
analytic, quad = session.run([analytic, quad])
assert_allclose(analytic, quad, rtol=RTOL)
# =================================== TESTS ===================================
@pytest.mark.parametrize("distribution", [gauss])
@pytest.mark.parametrize("mean1", [lin_mean, identity_mean, const_mean, zero_mean])
@pytest.mark.parametrize("mean2", [lin_mean, identity_mean, const_mean, zero_mean])
@pytest.mark.parametrize("arg_filter",
[lambda p, m1, m2: (p, m1),
lambda p, m1, m2: (p, m1, m2)])
def test_mean_function_only_expectations(session_tf, distribution, mean1, mean2, arg_filter):
params = arg_filter(distribution(), mean1(), mean2())
_check(params)
@pytest.mark.parametrize("distribution", [gauss, gauss_diag])
@pytest.mark.parametrize("kernel", [lin_kern, rbf_kern, rbf_lin_sum_kern, rbf_lin_prod_kern])
@pytest.mark.parametrize("arg_filter",
[lambda p, k, f: (p, k),
lambda p, k, f: (p, (k, f)),
lambda p, k, f: (p, (k, f), (k, f))])
def test_kernel_only_expectations(session_tf, distribution, kernel, feature, arg_filter):
params = arg_filter(distribution(), kernel(), feature)
_check(params)
@pytest.mark.parametrize("distribution", [gauss])
@pytest.mark.parametrize("kernel", [rbf_kern, lin_kern, matern_kern, rbf_lin_sum_kern])
@pytest.mark.parametrize("mean", [lin_mean, identity_mean, const_mean, zero_mean])
@pytest.mark.parametrize("arg_filter",
[lambda p, k, f, m: (p, (k, f), m),
lambda p, k, f, m: (p, m, (k, f))])
def test_kernel_mean_function_expectations(
session_tf, distribution, kernel, feature, mean, arg_filter):
params = arg_filter(distribution(), kernel(), feature, mean())
_check(params)
@pytest.mark.parametrize("kernel", [lin_kern, rbf_kern, rbf_lin_sum_kern, rbf_lin_prod_kern])
def test_eKdiag_no_uncertainty(session_tf, kernel):
eKdiag = expectation(dirac_diag(), kernel())
Kdiag = kernel().Kdiag(Data.Xmu)
eKdiag, Kdiag = session_tf.run([eKdiag, Kdiag])
assert_allclose(eKdiag, Kdiag, rtol=RTOL)
@pytest.mark.parametrize("kernel", [lin_kern, rbf_kern, rbf_lin_sum_kern, rbf_lin_prod_kern])
def test_eKxz_no_uncertainty(session_tf, kernel, feature):
eKxz = expectation(dirac_diag(), (kernel(), feature))
Kxz = kernel().K(Data.Xmu, Data.Z)
eKxz, Kxz = session_tf.run([eKxz, Kxz])
assert_allclose(eKxz, Kxz, rtol=RTOL)
@pytest.mark.parametrize("kernel", [lin_kern, rbf_kern, rbf_lin_sum_kern])
@pytest.mark.parametrize("mean", [lin_mean, identity_mean, const_mean, zero_mean])
def test_eMxKxz_no_uncertainty(session_tf, kernel, feature, mean):
exKxz = expectation(dirac_diag(), mean(), (kernel(), feature))
Kxz = kernel().K(Data.Xmu, Data.Z)
xKxz = expectation(dirac_gauss(), mean())[:, :, None] * Kxz[:, None, :]
exKxz, xKxz = session_tf.run([exKxz, xKxz])
assert_allclose(exKxz, xKxz, rtol=RTOL)
@pytest.mark.parametrize("kernel", [lin_kern, rbf_kern, rbf_lin_sum_kern, rbf_lin_prod_kern])
def test_eKzxKxz_no_uncertainty(session_tf, kernel, feature):
kern = kernel()
eKzxKxz = expectation(dirac_diag(), (kern, feature), (kern, feature))
Kxz = kern.K(Data.Xmu, Data.Z)
eKzxKxz, Kxz = session_tf.run([eKzxKxz, Kxz])
KzxKxz = Kxz[:, :, None] * Kxz[:, None, :]
assert_allclose(eKzxKxz, KzxKxz, rtol=RTOL)
def test_RBF_eKzxKxz_gradient_not_NaN(session_tf):
"""
Ensure that <K_{Z, x} K_{x, Z}>_p(x) is not NaN and correct, when
K_{Z, Z} is zero with finite precision. See pull request #595.
"""
kern = gpflow.kernels.RBF(1, lengthscales=0.1)
kern.variance = 2.
p = gpflow.probability_distributions.Gaussian(
tf.constant([[10]], dtype=gpflow.settings.tf_float),
tf.constant([[[0.1]]], dtype=gpflow.settings.tf_float))
z = gpflow.features.InducingPoints([[-10.], [10.]])
ekz = expectation(p, (kern, z), (kern, z))
g, = tf.gradients(ekz, kern.lengthscales._unconstrained_tensor)
grad = session_tf.run(g)
assert grad is not None and not np.isnan(grad)
@pytest.mark.parametrize("kernel1", [rbf_kern_act_dim_0, lin_kern_act_dim_0])
@pytest.mark.parametrize("kernel2", [rbf_kern_act_dim_1, lin_kern_act_dim_1])
def test_eKzxKxz_separate_dims_simplification(
session_tf, kernel1, kernel2, feature):
_check((gauss_diag(), (kernel1(), feature), (kernel2(), feature)))
def test_eKzxKxz_different_sum_kernels(session_tf, feature):
kern1, kern2 = rbf_lin_sum_kern(), rbf_lin_sum_kern2()
_check((gauss(), (kern1, feature), (kern2, feature)))
def test_eKzxKxz_same_vs_different_sum_kernels(session_tf, feature):
# check the result is the same if we pass different objects with the same value
kern1 = rbf_lin_sum_kern2()
kern2 = copy.copy(rbf_lin_sum_kern2())
same = expectation(*(gauss(), (kern1, feature), (kern1, feature)))
different = expectation(*(gauss(), (kern1, feature), (kern2, feature)))
session = tf.get_default_session()
same, different = session.run([same, different])
assert_allclose(same, different, rtol=RTOL)
@pytest.mark.parametrize("kernel", [rbf_kern, lin_kern, rbf_lin_sum_kern])
def test_exKxz_markov(session_tf, kernel, feature):
_check((markov_gauss(), (kernel(), feature), identity_mean()))
@pytest.mark.parametrize("kernel", [rbf_kern, lin_kern, rbf_lin_sum_kern])
def test_exKxz_markov_no_uncertainty(session_tf, kernel, feature):
exKxz = expectation(dirac_markov_gauss(), (kernel(), feature), identity_mean())
exKxz = session_tf.run(exKxz)
Kzx = kernel().compute_K(Data.Xmu_markov[:-1, :], Data.Z) # NxM
xKxz = Kzx[..., None] * Data.Xmu_markov[1:, None, :] # NxMxD
assert_allclose(exKxz, xKxz, rtol=RTOL)
@pytest.mark.parametrize("distribution", [gauss, gauss_diag, markov_gauss])
def test_cov_shape_inference(session_tf, distribution, feature):
gauss_tuple = (distribution().mu, distribution().cov)
_check((gauss_tuple, (rbf_kern(), feature)))
if isinstance(distribution(), MarkovGaussian):
_check((gauss_tuple, None, (rbf_kern(), feature)))
@pytest.mark.parametrize("distribution", [gauss, gauss_diag])
@pytest.mark.parametrize("kernel1", [rbf_kern, rbf_kern_2])
@pytest.mark.parametrize("kernel2", [rbf_kern, rbf_kern_2])
@pytest.mark.parametrize("feat1", [feature, feature2])
@pytest.mark.parametrize("feat2", [feature, feature2])
def test_eKzxKxz_rbf_cross_covariance(session_tf,
distribution, kernel1, kernel2,
feat1, feat2):
_check((distribution(), (kernel1(), feat1()), (kernel2(), feat2())))
| 34.808696
| 94
| 0.691148
|
4dd1b8d9b9023b06396fda1b851e8d28b6b5d1b9
| 327
|
py
|
Python
|
Python/HRL/01-Intro to Python/01_variables.py
|
Errrneist/Exia
|
d9040386089d64c67b466db99c64f786cdf625e8
|
[
"Apache-2.0"
] | null | null | null |
Python/HRL/01-Intro to Python/01_variables.py
|
Errrneist/Exia
|
d9040386089d64c67b466db99c64f786cdf625e8
|
[
"Apache-2.0"
] | null | null | null |
Python/HRL/01-Intro to Python/01_variables.py
|
Errrneist/Exia
|
d9040386089d64c67b466db99c64f786cdf625e8
|
[
"Apache-2.0"
] | null | null | null |
# Hongjun Wu
# 20180208
# Python test files. This file test on variable and basic calculations.
price = 8.5
weight = 7.5
money = weight * price
# 卧槽toString原来是这么写的啊
print('You need to pay this amount: ' + money.__str__())
print('Now I will give you five bucks backs.')
money -= 5
print('Now just pay: ' + money.__str__())
| 19.235294
| 71
| 0.700306
|
1811ab4eaae14f37e35d3b7d9342db1dd0f3339b
| 3,381
|
py
|
Python
|
skipp/filters/tests/tests/test_laplace.py
|
IntelPython/scikit-ipp
|
df3c595296ab2009f95b1cfe412f6bf0680d4be3
|
[
"BSD-3-Clause"
] | 6
|
2021-08-24T06:49:12.000Z
|
2022-03-22T11:12:06.000Z
|
skipp/filters/tests/tests/test_laplace.py
|
IntelPython/scikit-ipp
|
df3c595296ab2009f95b1cfe412f6bf0680d4be3
|
[
"BSD-3-Clause"
] | 4
|
2020-11-17T14:54:25.000Z
|
2021-03-18T08:58:15.000Z
|
skipp/filters/tests/tests/test_laplace.py
|
IntelPython/scikit-ipp
|
df3c595296ab2009f95b1cfe412f6bf0680d4be3
|
[
"BSD-3-Clause"
] | 2
|
2020-08-25T12:09:55.000Z
|
2022-03-19T12:06:18.000Z
|
# ******************************************************************************
# Copyright (c) 2020, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ******************************************************************************
import pytest
import numpy as np
from numpy.testing import assert_allclose
from skipp.filters import laplace
def test_laplace_zeros():
"""Laplace on a square image."""
# Create a synthetic 2D image
image = np.zeros((9, 9), dtype=np.float32)
image[3:-3, 3:-3] = 1
result = laplace(image)
check_result = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., 0., 0., 0.],
[0., 0., -1., 2., 1., 2., -1., 0., 0.],
[0., 0., -1., 1., 0., 1., -1., 0., 0.],
[0., 0., -1., 2., 1., 2., -1., 0., 0.],
[0., 0., 0., -1., -1., -1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_allclose(result, check_result)
# TODO
@pytest.mark.skip(reason="deveoloping _mask_filter_result")
def test_laplace_mask():
"""Laplace on a masked array should be zero."""
# Create a synthetic 2D image
image = np.zeros((9, 9), dtype=np.float32)
image[3:-3, 3:-3] = 1
# Define the mask
result = laplace(image, ksize=3, mask=np.zeros((9, 9), dtype=bool))
assert (np.all(result == 0))
@pytest.mark.parametrize(
"dtype", [np.uint8, np.uint16, np.int16, np.float32]
)
def test_laplace_preserve_dtype(dtype):
image = np.arange(25, dtype=dtype).reshape(5, 5)
filtered = laplace(image)
assert filtered.dtype == dtype
| 49
| 81
| 0.578527
|
27c4b5a2b254dbb84348676dbaa9908cce764ab4
| 22,871
|
py
|
Python
|
mkt/webapps/indexers.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/webapps/indexers.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/webapps/indexers.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from operator import attrgetter
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db.models import Min
from elasticsearch_dsl import F, filter as es_filter, query
import commonware.log
import amo
import mkt
from mkt.constants import APP_FEATURES
from mkt.constants.applications import DEVICE_GAIA
from mkt.features.utils import get_feature_profile
from mkt.prices.models import AddonPremium
from mkt.search.indexers import BaseIndexer
from mkt.search.utils import Search
from mkt.translations.models import attach_trans_dict
from mkt.translations.utils import to_language
from mkt.versions.models import Version
log = commonware.log.getLogger('z.addons')
class WebappIndexer(BaseIndexer):
"""
Bunch of ES stuff for Webapp include mappings, indexing, search.
"""
@classmethod
def search(cls, using=None):
"""
Returns a `Search` object.
We override this to use our patched version which adds statsd timing.
"""
return Search(using=using or cls.get_es(),
index=cls.get_index(),
doc_type=cls.get_mapping_type_name())
@classmethod
def get_mapping_type_name(cls):
"""
Returns mapping type name which is used as the key in ES_INDEXES to
determine which index to use.
We override this because Webapp is a proxy model to Addon.
"""
return 'webapp'
@classmethod
def get_model(cls):
from mkt.webapps.models import Webapp
return Webapp
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
def _locale_field_mapping(field, analyzer):
get_analyzer = lambda a: (
'%s_analyzer' % a if a in amo.STEMMER_MAP else a)
return {'%s_%s' % (field, analyzer): {
'type': 'string', 'analyzer': get_analyzer(analyzer)}}
mapping = {
doc_type: {
# Disable _all field to reduce index size.
'_all': {'enabled': False},
'properties': {
# Add a boost field to enhance relevancy of a document.
# This is used during queries in a function scoring query.
'boost': {'type': 'long', 'doc_values': True},
# App fields.
'id': {'type': 'long'},
'app_slug': {'type': 'string'},
'app_type': {'type': 'byte'},
'author': {
'type': 'string',
'analyzer': 'default_icu',
'fields': {
# For exact matches. The simple analyzer allows
# for case-insensitive matching.
'raw': {'type': 'string',
'analyzer': 'exact_lowercase'},
},
},
'banner_regions': cls.string_not_indexed(),
'bayesian_rating': {'type': 'float', 'doc_values': True},
'category': cls.string_not_analyzed(),
'collection': {
'type': 'nested',
'include_in_parent': True,
'properties': {
'id': {'type': 'long'},
'order': {'type': 'short'}
}
},
'content_descriptors': cls.string_not_indexed(),
'content_ratings': {
'type': 'object',
'dynamic': 'true',
},
'created': {'format': 'dateOptionalTime', 'type': 'date',
'doc_values': True},
'current_version': cls.string_not_indexed(),
'default_locale': cls.string_not_indexed(),
'description': {'type': 'string',
'analyzer': 'default_icu'},
'device': {'type': 'byte'},
'features': {
'type': 'object',
'properties': dict(
('has_%s' % f.lower(), {'type': 'boolean'})
for f in APP_FEATURES)
},
'has_public_stats': {'type': 'boolean'},
'icon_hash': cls.string_not_indexed(),
'interactive_elements': cls.string_not_indexed(),
'is_disabled': {'type': 'boolean'},
'is_escalated': {'type': 'boolean'},
'is_offline': {'type': 'boolean'},
'is_priority': {'type': 'boolean'},
'is_rereviewed': {'type': 'boolean'},
'last_updated': {'format': 'dateOptionalTime',
'type': 'date'},
'latest_version': {
'type': 'object',
'properties': {
'status': {'type': 'byte'},
'is_privileged': {'type': 'boolean'},
'has_editor_comment': {'type': 'boolean'},
'has_info_request': {'type': 'boolean'},
'nomination_date': {'type': 'date',
'format': 'dateOptionalTime'},
'created_date': {'type': 'date',
'format': 'dateOptionalTime'},
},
},
'manifest_url': cls.string_not_analyzed(),
'modified': {'format': 'dateOptionalTime',
'type': 'date'},
# Name for searching.
'name': {'type': 'string', 'analyzer': 'default_icu'},
# Name for sorting.
'name_sort': cls.string_not_analyzed(doc_values=True),
# Name for suggestions.
'name_suggest': {'type': 'completion', 'payloads': True},
'owners': {'type': 'long'},
'package_path': cls.string_not_indexed(),
'popularity': {'type': 'long', 'doc_values': True},
'premium_type': {'type': 'byte'},
'previews': {
'type': 'object',
'dynamic': 'true',
},
'price_tier': cls.string_not_indexed(),
'ratings': {
'type': 'object',
'properties': {
'average': {'type': 'float'},
'count': {'type': 'short'},
}
},
'region_exclusions': {'type': 'short'},
'reviewed': {'format': 'dateOptionalTime', 'type': 'date',
'doc_values': True},
'status': {'type': 'byte'},
'supported_locales': cls.string_not_analyzed(),
'tags': {'type': 'string', 'analyzer': 'simple'},
'upsell': {
'type': 'object',
'properties': {
'id': {'type': 'long'},
'app_slug': cls.string_not_indexed(),
'icon_url': cls.string_not_indexed(),
'name': cls.string_not_indexed(),
'region_exclusions': {'type': 'short'},
}
},
'uses_flash': {'type': 'boolean'},
'versions': {
'type': 'object',
'properties': {
'version': cls.string_not_indexed(),
'resource_uri': cls.string_not_indexed(),
}
},
'weekly_downloads': {'type': 'long', 'doc_values': True},
}
}
}
# Add popularity by region.
for region in mkt.regions.ALL_REGION_IDS:
mapping[doc_type]['properties'].update(
{'popularity_%s' % region: {'type': 'long'}})
# Add fields that we expect to return all translations.
cls.attach_translation_mappings(
mapping, ('banner_message', 'description', 'homepage',
'name', 'release_notes', 'support_email',
'support_url'))
# Add room for language-specific indexes.
for analyzer in amo.SEARCH_ANALYZER_MAP:
if (not settings.ES_USE_PLUGINS and
analyzer in amo.SEARCH_ANALYZER_PLUGINS):
log.info('While creating mapping, skipping the %s analyzer'
% analyzer)
continue
mapping[doc_type]['properties'].update(
_locale_field_mapping('name', analyzer))
mapping[doc_type]['properties'].update(
_locale_field_mapping('description', analyzer))
return mapping
@classmethod
def extract_document(cls, pk=None, obj=None):
"""Extracts the ElasticSearch index document for this instance."""
from mkt.webapps.models import (AppFeatures, attach_devices,
attach_prices, attach_tags,
attach_translations, Geodata,
Installed, RatingDescriptors,
RatingInteractives)
if obj is None:
obj = cls.get_model().objects.no_cache().get(pk=pk)
# Attach everything we need to index apps.
for transform in (attach_devices, attach_prices, attach_tags,
attach_translations):
transform([obj])
latest_version = obj.latest_version
version = obj.current_version
geodata = obj.geodata
features = (version.features.to_dict()
if version else AppFeatures().to_dict())
try:
status = latest_version.statuses[0][1] if latest_version else None
except IndexError:
status = None
installed_ids = list(Installed.objects.filter(addon=obj)
.values_list('id', flat=True))
attrs = ('app_slug', 'bayesian_rating', 'created', 'id', 'is_disabled',
'last_updated', 'modified', 'premium_type', 'status',
'uses_flash', 'weekly_downloads')
d = dict(zip(attrs, attrgetter(*attrs)(obj)))
d['boost'] = len(installed_ids) or 1
d['app_type'] = obj.app_type_id
d['author'] = obj.developer_name
d['banner_regions'] = geodata.banner_regions_slugs()
d['category'] = obj.categories if obj.categories else []
if obj.is_published:
d['collection'] = [{'id': cms.collection_id, 'order': cms.order}
for cms in obj.collectionmembership_set.all()]
else:
d['collection'] = []
d['content_ratings'] = (obj.get_content_ratings_by_body(es=True) or
None)
try:
d['content_descriptors'] = obj.rating_descriptors.to_keys()
except RatingDescriptors.DoesNotExist:
d['content_descriptors'] = []
d['current_version'] = version.version if version else None
d['default_locale'] = obj.default_locale
d['description'] = list(
set(string for _, string in obj.translations[obj.description_id]))
d['device'] = getattr(obj, 'device_ids', [])
d['features'] = features
d['has_public_stats'] = obj.public_stats
d['icon_hash'] = obj.icon_hash
try:
d['interactive_elements'] = obj.rating_interactives.to_keys()
except RatingInteractives.DoesNotExist:
d['interactive_elements'] = []
d['is_escalated'] = obj.escalationqueue_set.exists()
d['is_offline'] = getattr(obj, 'is_offline', False)
d['is_priority'] = obj.priority_review
d['is_rereviewed'] = obj.rereviewqueue_set.exists()
if latest_version:
d['latest_version'] = {
'status': status,
'is_privileged': latest_version.is_privileged,
'has_editor_comment': latest_version.has_editor_comment,
'has_info_request': latest_version.has_info_request,
'nomination_date': latest_version.nomination,
'created_date': latest_version.created,
}
else:
d['latest_version'] = {
'status': None,
'is_privileged': None,
'has_editor_comment': None,
'has_info_request': None,
'nomination_date': None,
'created_date': None,
}
d['manifest_url'] = obj.get_manifest_url()
d['package_path'] = obj.get_package_path()
d['name'] = list(
set(string for _, string in obj.translations[obj.name_id]))
d['name_sort'] = unicode(obj.name).lower()
d['owners'] = [au.user.id for au in
obj.addonuser_set.filter(role=amo.AUTHOR_ROLE_OWNER)]
d['popularity'] = len(installed_ids)
d['previews'] = [{'filetype': p.filetype, 'modified': p.modified,
'id': p.id, 'sizes': p.sizes}
for p in obj.previews.all()]
try:
p = obj.addonpremium.price
d['price_tier'] = p.name
except AddonPremium.DoesNotExist:
d['price_tier'] = None
d['ratings'] = {
'average': obj.average_rating,
'count': obj.total_reviews,
}
d['region_exclusions'] = obj.get_excluded_region_ids()
d['reviewed'] = obj.versions.filter(
deleted=False).aggregate(Min('reviewed')).get('reviewed__min')
if version:
d['supported_locales'] = filter(
None, version.supported_locales.split(','))
else:
d['supported_locales'] = []
d['tags'] = getattr(obj, 'tag_list', [])
if obj.upsell and obj.upsell.premium.is_published():
upsell_obj = obj.upsell.premium
d['upsell'] = {
'id': upsell_obj.id,
'app_slug': upsell_obj.app_slug,
'icon_url': upsell_obj.get_icon_url(128),
# TODO: Store all localizations of upsell.name.
'name': unicode(upsell_obj.name),
'region_exclusions': upsell_obj.get_excluded_region_ids()
}
d['versions'] = [dict(version=v.version,
resource_uri=reverse_version(v))
for v in obj.versions.all()]
# Handle our localized fields.
for field in ('description', 'homepage', 'name', 'support_email',
'support_url'):
d['%s_translations' % field] = [
{'lang': to_language(lang), 'string': string}
for lang, string
in obj.translations[getattr(obj, '%s_id' % field)]
if string]
if version:
attach_trans_dict(Version, [version])
d['release_notes_translations'] = [
{'lang': to_language(lang), 'string': string}
for lang, string
in version.translations[version.releasenotes_id]]
else:
d['release_notes_translations'] = None
attach_trans_dict(Geodata, [geodata])
d['banner_message_translations'] = [
{'lang': to_language(lang), 'string': string}
for lang, string
in geodata.translations[geodata.banner_message_id]]
for region in mkt.regions.ALL_REGION_IDS:
d['popularity_%s' % region] = d['popularity']
# Bump the boost if the add-on is public.
if obj.status == amo.STATUS_PUBLIC:
d['boost'] = max(d['boost'], 1) * 4
# If the app is compatible with Firefox OS, push suggestion data in the
# index - This will be used by RocketbarView API, which is specific to
# Firefox OS.
if DEVICE_GAIA.id in d['device'] and obj.is_published():
d['name_suggest'] = {
'input': d['name'],
'output': unicode(obj.id), # We only care about the payload.
'weight': d['boost'],
'payload': {
'default_locale': d['default_locale'],
'icon_hash': d['icon_hash'],
'id': d['id'],
'manifest_url': d['manifest_url'],
'modified': d['modified'],
'name_translations': d['name_translations'],
'slug': d['app_slug'],
}
}
# Indices for each language. languages is a list of locales we want to
# index with analyzer if the string's locale matches.
for analyzer, languages in amo.SEARCH_ANALYZER_MAP.iteritems():
if (not settings.ES_USE_PLUGINS and
analyzer in amo.SEARCH_ANALYZER_PLUGINS):
continue
d['name_' + analyzer] = list(
set(string for locale, string in obj.translations[obj.name_id]
if locale.lower() in languages))
d['description_' + analyzer] = list(
set(string for locale, string
in obj.translations[obj.description_id]
if locale.lower() in languages))
return d
@classmethod
def get_indexable(cls):
"""Returns the queryset of ids of all things to be indexed."""
from mkt.webapps.models import Webapp
return Webapp.with_deleted.all()
@classmethod
def run_indexing(cls, ids, ES, index=None, **kw):
"""Override run_indexing to use app transformers."""
from mkt.webapps.models import Webapp
sys.stdout.write('Indexing %s webapps\n' % len(ids))
qs = Webapp.with_deleted.no_cache().filter(id__in=ids)
docs = []
for obj in qs:
try:
docs.append(cls.extract_document(obj.id, obj=obj))
except Exception as e:
sys.stdout.write('Failed to index webapp {0}: {1}\n'.format(
obj.id, e))
cls.bulk_index(docs, es=ES, index=index or cls.get_index())
@classmethod
def get_app_filter(cls, request, additional_data=None, sq=None,
app_ids=None, no_filter=False):
"""
THE grand, consolidated ES filter for Webapps. By default:
- Excludes non-public apps.
- Excludes disabled apps (whether by reviewer or by developer).
- Excludes based on region exclusions.
- TODO: Excludes based on device and platform support.
additional_data -- an object with more data to allow more filtering.
sq -- if you have an existing search object to filter off of.
app_ids -- if you want to filter by a list of app IDs.
no_filter -- doesn't apply the consumer-side excludes (public/region).
"""
from mkt.api.base import get_region_from_request
from mkt.search.views import name_query
sq = sq or cls.search()
additional_data = additional_data or {}
app_ids = app_ids or []
data = {
'app_type': [],
'author.raw': None,
'category': None, # Slug.
'device': None, # ID.
'gaia': getattr(request, 'GAIA', False),
'is_offline': None,
'manifest_url': '',
'mobile': getattr(request, 'MOBILE', False),
'premium_type': [],
'profile': get_feature_profile(request),
'q': '',
'region': getattr(get_region_from_request(request), 'id', None),
'status': None,
'supported_locales': [],
'tablet': getattr(request, 'TABLET', False),
'tags': '',
}
data.update(additional_data)
# Fields that will be filtered with a term query.
term_fields = ('author.raw', 'device', 'manifest_url', 'status',
'tags')
# Fields that will be filtered with a terms query.
terms_fields = ('category', 'premium_type', 'app_type',
'supported_locales')
# QUERY.
if data['q']:
# Function score for popularity boosting (defaults to multiply).
sq = sq.query(
'function_score',
query=name_query(data['q'].lower()),
functions=[query.SF('field_value_factor', field='boost')])
# MUST.
must = [
F('term', status=amo.STATUS_PUBLIC),
F('term', is_disabled=False),
] if not no_filter else []
for field in term_fields + terms_fields:
# Term filters.
if data[field]:
filter_type = 'term' if field in term_fields else 'terms'
must.append(F(filter_type, **{field: data[field]}))
if not no_filter:
if data['profile']:
# Feature filters.
profile = data['profile']
for k, v in profile.to_kwargs(prefix='features.has_').items():
must.append(F('term', **{k: v}))
if data['mobile'] or data['gaia']:
# Uses flash.
must.append(F('term', uses_flash=False))
if data['is_offline'] is not None:
must.append(F('term', is_offline=data['is_offline']))
# SHOULD.
should = []
if app_ids:
should = [es_filter.Terms(id=list(set(app_ids)))]
sq = sq[0:len(set(app_ids))]
# FILTER.
if must or should:
sq = sq.filter(es_filter.Bool(must=must, should=should))
if data['region'] and not no_filter:
# Region exclusions.
sq = sq.filter(~F('term', region_exclusions=data['region']))
return sq
def reverse_version(version):
"""
The try/except AttributeError allows this to be used where the input is
ambiguous, and could be either an already-reversed URL or a Version object.
"""
if version:
try:
return reverse('version-detail', kwargs={'pk': version.pk})
except AttributeError:
return version
return
| 41.432971
| 79
| 0.50505
|
8d150c6bcf0e26f6f91eec2f63a144fb86ae6759
| 2,503
|
py
|
Python
|
includes/cisco.py
|
b1scuit-thi3f/jimiPlugin-remote
|
915621e325df6424832a9052d47af3ee0f6b095a
|
[
"Apache-2.0"
] | null | null | null |
includes/cisco.py
|
b1scuit-thi3f/jimiPlugin-remote
|
915621e325df6424832a9052d47af3ee0f6b095a
|
[
"Apache-2.0"
] | null | null | null |
includes/cisco.py
|
b1scuit-thi3f/jimiPlugin-remote
|
915621e325df6424832a9052d47af3ee0f6b095a
|
[
"Apache-2.0"
] | null | null | null |
import time
import re
import logging
from netmiko import ConnectHandler
from plugins.remote.includes import remote
class cisco(remote.remote):
def __init__(self, host, deviceHostname, username="Admin", password='', enablePassword="", port=22, timeout=5):
self.host = host
self.deviceHostname = deviceHostname
self.timeout = timeout
self.enablePassword = enablePassword
self.error = ""
self.type = "cisco_ios"
self.client = self.connect(username,password,port)
def connect(self,username,password,port):
try:
client = ConnectHandler(host=self.host, device_type=self.type, username=username, password=password, secret=self.enablePassword, port=port, system_host_keys=True, timeout=self.timeout)
detectedDevice = client.find_prompt().strip()[:-1]
if detectedDevice != self.deviceHostname:
self.error = f"Device detected name does not match the device name provided. Hostname found = {detectedDevice}"
client.disconnect()
return None
return client
except Exception as e:
self.error = e
return None
def disconnect(self):
if self.client:
self.client.disconnect()
self.client = None
def sendCommand(self,command,attempt=0):
if attempt > 3:
return False
output = self.client.send_command(command)
if output:
return output
time.sleep(0.1)
logging.warning("Command was not received by remote console. command={0}, attempt={1}".format(command),attempt)
return self.sendCommand(command,attempt+1)
def command(self, command, args=[], elevate=False, runAs=None, timeout=5):
if command == "enable":
try:
self.client.enable()
return (0, "test")
except ValueError:
return (None,"","Could not enable")
if args:
command = command + " " + " ".join(args)
returnedData = self.sendCommand(command)
if returnedData is False:
return (None,"","Unable to send command")
if returnedData == False or "% Invalid input detected at '^'" in returnedData or "% Incomplete command." in returnedData or "Command rejected" in returnedData:
return (None,"",returnedData)
return (0, returnedData, "")
def __del__(self):
self.disconnect()
| 39.109375
| 196
| 0.613264
|
07b238fba3c3d48e0514e0643187d25b907bc648
| 4,360
|
py
|
Python
|
app/utils.py
|
kindweb223/React-Django-M3U8
|
ee3edfabcf4e4423e96591df7cf42f8d89813292
|
[
"Apache-2.0"
] | 1
|
2020-12-28T07:54:05.000Z
|
2020-12-28T07:54:05.000Z
|
app/utils.py
|
kindweb223/React-Django-M3U8
|
ee3edfabcf4e4423e96591df7cf42f8d89813292
|
[
"Apache-2.0"
] | 11
|
2020-06-05T17:35:01.000Z
|
2022-02-26T07:00:16.000Z
|
app/utils.py
|
kindweb223/React-Django-M3U8
|
ee3edfabcf4e4423e96591df7cf42f8d89813292
|
[
"Apache-2.0"
] | 1
|
2021-11-02T11:35:45.000Z
|
2021-11-02T11:35:45.000Z
|
import json
import logging
import re
import requests
from django.conf import settings
logger = logging.getLogger(__name__)
class M3U8ChannelFactory(object):
group = None
duration = None
title = None
path = None
extra_data = {}
def init_channel(self, extinf_string):
logger.info('Init channel: %s', extinf_string)
self.duration = None
self.title = None
self.group = None
self.path = None
self.extra_data = dict()
try:
duration = re.findall(r'EXTINF:(-?\d+)', extinf_string)[0]
title = extinf_string.split(',')[-1]
self.title = title
self.duration = duration
except IndexError as e:
logging.warning('Unable to parse EXTINF string: {}. Error: {} '.format(extinf_string, e))
return
# Collect extra attrs
extra_attrs = [
'tvg-ID',
'tvg-name',
'tvg-logo',
'group-title'
]
for attr in extra_attrs:
attr_values = re.findall(r'^.*(?i){attr}="([^"]*)".*'.format(attr=attr), extinf_string)
if attr_values:
self.extra_data[attr] = attr_values[0]
def process_line(self, line):
""" Add line to the channel """
logger.info('Processing line: %s', line)
if not line:
return
if isinstance(line, bytes):
line = line.decode("utf-8", errors="ignore")
if line == '#EXTM3U':
return
if line.startswith('#EXTINF:'):
self.init_channel(line)
return
if line.startswith('#EXTGRP:'):
self.group = line[8:]
return
if line.startswith('#'):
logger.warning('Unsupported line skipped: {}'.format(line))
return
# Line without hash is the last in the channel definition and it is the path
logger.info('Adding path: %s', line)
self.path = line
def get_extra_data(self):
if self.extra_data:
return json.dumps(self.extra_data)
else:
return None
def is_complete(self):
logger.info('Checking if channel complete')
return all([self.path, self.title, self.group, self.duration])
def load_remote_m3u8(link, playlist, remove_existed=False):
from app.models import Channel
r = requests.get(link)
if not r.ok:
return False
if remove_existed:
Channel.objects.filter(playlists=playlist).delete()
channel_factory = M3U8ChannelFactory()
channel_count = 0
for line in r.iter_lines(decode_unicode=True):
channel_factory.process_line(line)
if channel_factory.is_complete():
channel_count += 1
if channel_count > settings.MAX_CHANNELS:
logger.warning('Too many channels, only %s was loaded', settings.MAX_CHANNELS)
return False
channel_obj = Channel.objects.create(
user=playlist.user,
title=channel_factory.title,
duration=channel_factory.duration,
group=channel_factory.group,
extra_data=channel_factory.get_extra_data(),
path=channel_factory.path
)
channel_obj.playlists.add(playlist)
return True
def load_m3u8_from_file(fo, playlist, remove_existed=False):
from app.models import Channel
if remove_existed:
Channel.objects.filter(playlists=playlist).delete()
channel_factory = M3U8ChannelFactory()
channel_count = 0
for line in fo.read().splitlines():
channel_factory.process_line(line)
if channel_factory.is_complete():
channel_count += 1
if channel_count > settings.MAX_CHANNELS:
logger.warning('Too many channels, only %s was loaded', settings.MAX_CHANNELS)
return False
channel_obj = Channel.objects.create(
user=playlist.user,
title=channel_factory.title,
duration=channel_factory.duration,
group=channel_factory.group,
extra_data=channel_factory.get_extra_data(),
path=channel_factory.path
)
channel_obj.playlists.add(playlist)
return True
| 28.874172
| 101
| 0.586468
|
d0d0e5620db0317ecc637195eadc729744f5e9ed
| 5,405
|
py
|
Python
|
warehouse/admin/views/banners.py
|
fairhopeweb/warehouse
|
7d8ef742e8fe6b401190c28ce56761848041c89f
|
[
"Apache-2.0"
] | 3,103
|
2015-01-30T00:24:10.000Z
|
2022-03-31T23:21:39.000Z
|
warehouse/admin/views/banners.py
|
fairhopeweb/warehouse
|
7d8ef742e8fe6b401190c28ce56761848041c89f
|
[
"Apache-2.0"
] | 6,709
|
2015-01-05T01:23:20.000Z
|
2022-03-31T14:49:46.000Z
|
warehouse/admin/views/banners.py
|
fairhopeweb/warehouse
|
7d8ef742e8fe6b401190c28ce56761848041c89f
|
[
"Apache-2.0"
] | 959
|
2015-01-12T22:22:40.000Z
|
2022-03-31T22:21:51.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wtforms
from pyramid.httpexceptions import HTTPNotFound, HTTPSeeOther
from pyramid.view import view_config
from sqlalchemy.orm.exc import NoResultFound
from warehouse.banners.models import Banner
from warehouse.forms import Form, URIValidator
@view_config(
route_name="admin.banner.list",
renderer="admin/banners/list.html",
permission="admin_dashboard_access",
request_method="GET",
uses_session=True,
)
def banner_list(request):
banners = request.db.query(Banner).all()
return {"banners": banners}
@view_config(
route_name="admin.banner.edit",
renderer="admin/banners/edit.html",
permission="admin_dashboard_access",
request_method="GET",
uses_session=True,
require_csrf=True,
require_methods=False,
)
@view_config(
route_name="admin.banner.edit",
renderer="admin/banners/edit.html",
permission="psf_staff",
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def edit_banner(request):
id_ = request.matchdict["banner_id"]
try:
banner = request.db.query(Banner).filter(Banner.id == id_).one()
except NoResultFound:
raise HTTPNotFound
form = BannerForm(request.POST if request.method == "POST" else None, banner)
if request.method == "POST" and form.validate():
form.populate_obj(banner)
request.session.flash("Banner updated", queue="success")
return HTTPSeeOther(location=request.current_route_path())
return {"banner": banner, "form": form}
@view_config(
route_name="admin.banner.create",
renderer="admin/banners/edit.html",
permission="admin_dashboard_access",
request_method="GET",
uses_session=True,
require_csrf=True,
require_methods=False,
)
@view_config(
route_name="admin.banner.create",
renderer="admin/banners/edit.html",
permission="psf_staff",
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def create_banner(request):
form = BannerForm(request.POST if request.method == "POST" else None)
if request.method == "POST" and form.validate():
banner = Banner(**form.data)
request.db.add(banner)
request.session.flash(
f"Added new banner '{banner.name}'",
queue="success",
)
redirect_url = request.route_url("admin.banner.list")
return HTTPSeeOther(location=redirect_url)
return {"form": form}
@view_config(
route_name="admin.banner.delete",
require_methods=["POST"],
permission="psf_staff",
uses_session=True,
require_csrf=True,
)
def delete_banner(request):
id_ = request.matchdict["banner_id"]
try:
banner = request.db.query(Banner).filter(Banner.id == id_).one()
except NoResultFound:
raise HTTPNotFound
# Safeguard check on banner name
if banner.name != request.params.get("banner"):
request.session.flash("Wrong confirmation input", queue="error")
return HTTPSeeOther(request.route_url("admin.banner.edit", banner_id=banner.id))
# Delete the banner
request.db.delete(banner)
request.session.flash(f"Deleted banner {banner.name}", queue="success")
return HTTPSeeOther(request.route_url("admin.banner.list"))
@view_config(
route_name="admin.banner.preview",
require_methods=["GET"],
permission="moderator",
uses_session=True,
require_csrf=True,
has_translations=True,
renderer="admin/banners/preview.html",
)
def preview_banner(request):
id_ = request.matchdict["banner_id"]
try:
banner = request.db.query(Banner).filter(Banner.id == id_).one()
return {"banner": banner}
except NoResultFound:
raise HTTPNotFound
class BannerForm(Form):
name = wtforms.fields.StringField(
validators=[
wtforms.validators.Length(max=100),
wtforms.validators.DataRequired(),
],
)
text = wtforms.fields.StringField(
validators=[
wtforms.validators.Length(max=280),
wtforms.validators.DataRequired(),
],
)
link_url = wtforms.fields.StringField(
validators=[
wtforms.validators.DataRequired(),
URIValidator(),
]
)
link_label = wtforms.fields.StringField(
validators=[
wtforms.validators.Optional(),
],
default=Banner.DEFAULT_BTN_LABEL,
)
fa_icon = wtforms.fields.StringField(
validators=[
wtforms.validators.Length(max=20),
wtforms.validators.Optional(),
],
default=Banner.DEFAULT_FA_ICON,
)
active = wtforms.fields.BooleanField(
validators=[wtforms.validators.Optional()], default=False
)
end = wtforms.fields.DateField(validators=[wtforms.validators.DataRequired()])
| 29.535519
| 88
| 0.679371
|
451f5c4d89346835c58d37cd42eadd492e17d300
| 893
|
py
|
Python
|
src/main.py
|
andresmesad09/challenge-python-01
|
188e26f938eddebc16d68d6730ad79f2c9f93a7e
|
[
"MIT"
] | null | null | null |
src/main.py
|
andresmesad09/challenge-python-01
|
188e26f938eddebc16d68d6730ad79f2c9f93a7e
|
[
"MIT"
] | null | null | null |
src/main.py
|
andresmesad09/challenge-python-01
|
188e26f938eddebc16d68d6730ad79f2c9f93a7e
|
[
"MIT"
] | 1
|
2020-07-21T04:02:47.000Z
|
2020-07-21T04:02:47.000Z
|
# Resolve the problem!!
PALINDROMES = [
'Acaso hubo buhos aca',
'A la catalana banal atacala',
'Amar da drama',
]
NOT_PALINDROMES = [
'Hola como estas',
'Platzi'
'Oscar',
]
def is_palindrome(palindrome):
# Start coding here
word_lower = palindrome.lower()
word_strip = word_lower.replace(' ', '')
word_inverse = word_strip[::-1]
if word_inverse == palindrome.replace(' ', '').lower():
return True
else:
return False
def validate():
for palindrome in PALINDROMES:
if not is_palindrome(palindrome):
return False
for not_palindrome in NOT_PALINDROMES:
if is_palindrome(not_palindrome):
return False
return True
def run():
if validate():
print('Completaste el test')
else:
print('No completaste el test')
if __name__ == '__main__':
run()
| 19
| 59
| 0.606943
|
f9ecdf663a769032384e347af9237ced758594dd
| 10,600
|
py
|
Python
|
openstack/tests/functional/cloud/test_identity.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 99
|
2018-03-28T15:41:45.000Z
|
2022-01-23T17:22:13.000Z
|
openstack/tests/functional/cloud/test_identity.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 5
|
2018-05-25T16:54:23.000Z
|
2021-11-21T02:27:16.000Z
|
openstack/tests/functional/cloud/test_identity.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 104
|
2018-04-06T14:33:54.000Z
|
2022-03-01T01:58:09.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_identity
----------------------------------
Functional tests for `shade` identity methods.
"""
import random
import string
from openstack.cloud.exc import OpenStackCloudException
from openstack.tests.functional import base
class TestIdentity(base.KeystoneBaseFunctionalTest):
def setUp(self):
super(TestIdentity, self).setUp()
self.role_prefix = 'test_role' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(5))
self.user_prefix = self.getUniqueString('user')
self.group_prefix = self.getUniqueString('group')
self.addCleanup(self._cleanup_users)
if self.identity_version not in ('2', '2.0'):
self.addCleanup(self._cleanup_groups)
self.addCleanup(self._cleanup_roles)
def _cleanup_groups(self):
exception_list = list()
for group in self.operator_cloud.list_groups():
if group['name'].startswith(self.group_prefix):
try:
self.operator_cloud.delete_group(group['id'])
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
# Raise an error: we must make users aware that something went
# wrong
raise OpenStackCloudException('\n'.join(exception_list))
def _cleanup_users(self):
exception_list = list()
for user in self.operator_cloud.list_users():
if user['name'].startswith(self.user_prefix):
try:
self.operator_cloud.delete_user(user['id'])
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
raise OpenStackCloudException('\n'.join(exception_list))
def _cleanup_roles(self):
exception_list = list()
for role in self.operator_cloud.list_roles():
if role['name'].startswith(self.role_prefix):
try:
self.operator_cloud.delete_role(role['name'])
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
raise OpenStackCloudException('\n'.join(exception_list))
def _create_user(self, **kwargs):
domain_id = None
if self.identity_version not in ('2', '2.0'):
domain = self.operator_cloud.get_domain('default')
domain_id = domain['id']
return self.operator_cloud.create_user(domain_id=domain_id, **kwargs)
def test_list_roles(self):
roles = self.operator_cloud.list_roles()
self.assertIsNotNone(roles)
self.assertNotEqual([], roles)
def test_get_role(self):
role = self.operator_cloud.get_role('admin')
self.assertIsNotNone(role)
self.assertIn('id', role)
self.assertIn('name', role)
self.assertEqual('admin', role['name'])
def test_search_roles(self):
roles = self.operator_cloud.search_roles(filters={'name': 'admin'})
self.assertIsNotNone(roles)
self.assertEqual(1, len(roles))
self.assertEqual('admin', roles[0]['name'])
def test_create_role(self):
role_name = self.role_prefix + '_create_role'
role = self.operator_cloud.create_role(role_name)
self.assertIsNotNone(role)
self.assertIn('id', role)
self.assertIn('name', role)
self.assertEqual(role_name, role['name'])
def test_delete_role(self):
role_name = self.role_prefix + '_delete_role'
role = self.operator_cloud.create_role(role_name)
self.assertIsNotNone(role)
self.assertTrue(self.operator_cloud.delete_role(role_name))
# TODO(Shrews): Once we can support assigning roles within shade, we
# need to make this test a little more specific, and add more for testing
# filtering functionality.
def test_list_role_assignments(self):
if self.identity_version in ('2', '2.0'):
self.skipTest("Identity service does not support role assignments")
assignments = self.operator_cloud.list_role_assignments()
self.assertIsInstance(assignments, list)
self.assertGreater(len(assignments), 0)
def test_list_role_assignments_v2(self):
user = self.operator_cloud.get_user('demo')
project = self.operator_cloud.get_project('demo')
assignments = self.operator_cloud.list_role_assignments(
filters={'user': user['id'], 'project': project['id']})
self.assertIsInstance(assignments, list)
self.assertGreater(len(assignments), 0)
def test_grant_revoke_role_user_project(self):
user_name = self.user_prefix + '_user_project'
user_email = 'nobody@nowhere.com'
role_name = self.role_prefix + '_grant_user_project'
role = self.operator_cloud.create_role(role_name)
user = self._create_user(name=user_name,
email=user_email,
default_project='demo')
self.assertTrue(self.operator_cloud.grant_role(
role_name, user=user['id'], project='demo', wait=True))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'user': user['id'],
'project': self.operator_cloud.get_project('demo')['id']
})
self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role(
role_name, user=user['id'], project='demo', wait=True))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'user': user['id'],
'project': self.operator_cloud.get_project('demo')['id']
})
self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments))
def test_grant_revoke_role_group_project(self):
if self.identity_version in ('2', '2.0'):
self.skipTest("Identity service does not support group")
role_name = self.role_prefix + '_grant_group_project'
role = self.operator_cloud.create_role(role_name)
group_name = self.group_prefix + '_group_project'
group = self.operator_cloud.create_group(
name=group_name,
description='test group',
domain='default')
self.assertTrue(self.operator_cloud.grant_role(
role_name, group=group['id'], project='demo'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'group': group['id'],
'project': self.operator_cloud.get_project('demo')['id']
})
self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role(
role_name, group=group['id'], project='demo'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'group': group['id'],
'project': self.operator_cloud.get_project('demo')['id']
})
self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments))
def test_grant_revoke_role_user_domain(self):
if self.identity_version in ('2', '2.0'):
self.skipTest("Identity service does not support domain")
role_name = self.role_prefix + '_grant_user_domain'
role = self.operator_cloud.create_role(role_name)
user_name = self.user_prefix + '_user_domain'
user_email = 'nobody@nowhere.com'
user = self._create_user(name=user_name,
email=user_email,
default_project='demo')
self.assertTrue(self.operator_cloud.grant_role(
role_name, user=user['id'], domain='default'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'user': user['id'],
'domain': self.operator_cloud.get_domain('default')['id']
})
self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role(
role_name, user=user['id'], domain='default'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'user': user['id'],
'domain': self.operator_cloud.get_domain('default')['id']
})
self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments))
def test_grant_revoke_role_group_domain(self):
if self.identity_version in ('2', '2.0'):
self.skipTest("Identity service does not support domain or group")
role_name = self.role_prefix + '_grant_group_domain'
role = self.operator_cloud.create_role(role_name)
group_name = self.group_prefix + '_group_domain'
group = self.operator_cloud.create_group(
name=group_name,
description='test group',
domain='default')
self.assertTrue(self.operator_cloud.grant_role(
role_name, group=group['id'], domain='default'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'group': group['id'],
'domain': self.operator_cloud.get_domain('default')['id']
})
self.assertIsInstance(assignments, list)
self.assertEqual(1, len(assignments))
self.assertTrue(self.operator_cloud.revoke_role(
role_name, group=group['id'], domain='default'))
assignments = self.operator_cloud.list_role_assignments({
'role': role['id'],
'group': group['id'],
'domain': self.operator_cloud.get_domain('default')['id']
})
self.assertIsInstance(assignments, list)
self.assertEqual(0, len(assignments))
| 42.231076
| 79
| 0.62717
|
baf73fab3c2dd1c31c1286ebc8b991979a6b0535
| 382
|
py
|
Python
|
src/Cheese/testError.py
|
KubaBoi/CheeseFramework
|
ffe35449f43efa957bd5b30b28f61d3bf165699c
|
[
"MIT"
] | 2
|
2022-03-10T15:37:12.000Z
|
2022-03-12T01:17:05.000Z
|
src/Cheese/testError.py
|
KubaBoi/CheeseFramework
|
ffe35449f43efa957bd5b30b28f61d3bf165699c
|
[
"MIT"
] | null | null | null |
src/Cheese/testError.py
|
KubaBoi/CheeseFramework
|
ffe35449f43efa957bd5b30b28f61d3bf165699c
|
[
"MIT"
] | null | null | null |
#cheese
class TestError(Exception):
def __init__(self, value, template, comment):
self.value = value
self.template = template
self.comment = comment
class MockError(Exception):
def __init__(self, repositoryName, methodName, argName):
self.repositoryName = repositoryName
self.methodName = methodName
self.argName = argName
| 25.466667
| 60
| 0.67801
|
538ab6da279f3078b867649572d493ea6e4cd4a0
| 1,434
|
py
|
Python
|
bikesharing/migrations/0047_vehicle_type_reservation_20210906_1403.py
|
iteratec/cykel
|
10e238395b4ca2ecc9c18272305af6c27da6c640
|
[
"MIT"
] | null | null | null |
bikesharing/migrations/0047_vehicle_type_reservation_20210906_1403.py
|
iteratec/cykel
|
10e238395b4ca2ecc9c18272305af6c27da6c640
|
[
"MIT"
] | null | null | null |
bikesharing/migrations/0047_vehicle_type_reservation_20210906_1403.py
|
iteratec/cykel
|
10e238395b4ca2ecc9c18272305af6c27da6c640
|
[
"MIT"
] | 1
|
2020-11-30T09:50:19.000Z
|
2020-11-30T09:50:19.000Z
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bikesharing", "0046_rent_remove_position_20201204_2059"),
]
operations = [
migrations.AddField(
model_name="vehicletype",
name="allow_spontaneous_rent",
field=models.BooleanField(
default=True,
),
),
migrations.AddField(
model_name="vehicletype",
name="allow_reservation",
field=models.BooleanField(
default=False,
),
),
migrations.AddField(
model_name="vehicletype",
name="min_spontaneous_rent_vehicles",
field=models.IntegerField(
default=0,
),
),
migrations.AddField(
model_name="vehicletype",
name="min_reservation_vehicles",
field=models.IntegerField(
default=0,
),
),
migrations.AddField(
model_name="vehicletype",
name="reservation_lead_time_minutes",
field=models.IntegerField(
default=120,
),
),
migrations.AddField(
model_name="vehicletype",
name="max_reservation_days",
field=models.IntegerField(
default=7,
),
),
]
| 26.555556
| 67
| 0.511158
|
78c021a9af559006ce42e991dc55f3b0e177fa9d
| 3,064
|
py
|
Python
|
pcraster/pcraster-4.2.0/pcraster-4.2.0/source/pcraster_block_python/usecase.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
pcraster/pcraster-4.2.0/pcraster-4.2.0/source/pcraster_block_python/usecase.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
pcraster/pcraster-4.2.0/pcraster-4.2.0/source/pcraster_block_python/usecase.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2.4
from pcraster import *
from PCRasterBlock import *
nrRows = 3
nrCols = 2
cellSize = 1
west = 0.0
north = 0.0
# Use pcrasterpy for all rasters.
raster = Raster(nrRows, nrCols, cellSize, west, north)
baseElevation = REAL4RasterData(raster, 0.0)
block = createBlock(baseElevation)
originalThickness = REAL4BlockData(block, 0.0) # Not used.
thickness = REAL4RasterData(raster, 1)
maxVoxelThickness = 0.1
compactors = Compactors()
age = REAL4BlockData(block, 12.5)
sediment = INT4BlockData(block, 3)
# sediment = INT4BlockData(block, "aboolean.pcr")
# ? data type sediment??
for i in range(block.nrCells()):
print(i)
sediment.setDefaultValue(i)
compactors.setCompactor(i, DummyCompactor())
setMVREAL4(thickness, i)
# TODO block kan eruit.
mackeyBridgeAdd(block, originalThickness, sediment, thickness,
maxVoxelThickness, compactors)
# thickness="dep.map"
# sedimentation="sed.map"
# add(block, thickness, maxVoxelThickness, compactors,
# sediment = sedimentation, age="age.map")
# add("sed.map", "age.map", thickness, maxVoxelThickness, compactors)
# add("age.map", "sed.map", thickness, maxVoxelThickness, compactors)
#
#sediment.default("sed.map")
#age.default("age.map")
# add(block, thickness, maxVoxelThickness, compactors)
# sand = sediment == 3
# Ksat=if(sand,12.4,4.0)
# Q=modflow(block)
regularBlock = resampleBlock(block, 0.2)
# writeREAL4BlockDataVTK(
# resampleREAL4BlockData(sediment, regularBlock), "sediment.xml")
writeINT4BlockDataVTK(
resampleINT4BlockData(sediment, regularBlock), "sediment.xml")
# lowestElevation = mapminimum(baseElevation(block))
lowestElevation = 0.0
# highestElevation = mapminimum(surfaceElevation(block))
# heightDifference = highestElevation - lowestElevation
heightDifference = 100
interval = 50
nrLayers = int(heightDifference / interval)
height = lowestElevation
for i in range(nrLayers):
profile = profileINT4(sediment, height)
height += interval
# report(height, "height%d" % (i))
# Check discretisation.
# for i in range(block.nrCells()):
# assert len(block.cell(i)) == 16
# assert block.cell(i).thickness() == 15.5
# assert block.cell(i)[len(block.cell(i)) - 1] == 0.5
# Check data.
# TODO copy from C++ tests
# elevation = surfaceElevation(block)
# writeBlock(block, "area1.pcrblock")
# writeINT4BlockData(sediment, "sediment.pcrblock")
# sediment2 = readINT4BlockData("sediment.pcrblock", block)
# assert sediment == sediment2
# d_block=block.block() -> skip, changed interface
# d_block.configure(1,1) -> skip, set from the outside, during save/read
# d_block.setBase(self.ElevationIni) -> skip, constructor argument
# d_block.add(ifthen(d_clone,scalar(50)),
# ifthen(d_clone,scalar(10.0)), \
# ifthen(d_clone,nominal(0))) -> skip, changed interface
# Elevation=d_block.surface()
# d_block.remove(Erosion) -> skip, changed interface
# d_sampleData.d_block.resample(0.4) -> changed interface
# d_sampleData.d_block.saveAsBinary() -> use profile which returns a raster
print("done")
| 30.039216
| 75
| 0.725849
|
300989971702e21924f3f86b1494a6a6796bbc98
| 790
|
py
|
Python
|
bot/tests/conftest.py
|
sh4rpy/volodya
|
5165584281ddfeabe5b1e637690a6adade291524
|
[
"BSD-3-Clause"
] | null | null | null |
bot/tests/conftest.py
|
sh4rpy/volodya
|
5165584281ddfeabe5b1e637690a6adade291524
|
[
"BSD-3-Clause"
] | null | null | null |
bot/tests/conftest.py
|
sh4rpy/volodya
|
5165584281ddfeabe5b1e637690a6adade291524
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import pytest
from django.conf import settings
from dotenv import load_dotenv
from users.models import TelegramUser
load_dotenv()
@pytest.fixture(scope='session')
def django_db_setup():
settings.DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('DB_NAME', 'postgres'),
'USER': os.getenv('DB_USER', 'postgres'),
'HOST': os.getenv('DB_HOST', 'db'),
'PORT': os.getenv('DB_PORT', 5432),
'PASSWORD': os.getenv('DB_PASSWORD', 'postgres'),
}
@pytest.fixture
def get_telegram_admin_user_id():
return TelegramUser.objects.filter(is_admin=True).first().telegram_id
@pytest.fixture
def get_telegram_user_id():
return TelegramUser.objects.filter(is_admin=False).first().telegram_id
| 23.939394
| 74
| 0.693671
|
6cea097d6f247ff42da78bf9b6e670027776a98d
| 2,482
|
py
|
Python
|
contrib/pyi_runtimehook.py
|
stashpayio/electrum-stash
|
a04e1fde408196e547cf80f8ce9d9391133bd865
|
[
"MIT"
] | null | null | null |
contrib/pyi_runtimehook.py
|
stashpayio/electrum-stash
|
a04e1fde408196e547cf80f8ce9d9391133bd865
|
[
"MIT"
] | null | null | null |
contrib/pyi_runtimehook.py
|
stashpayio/electrum-stash
|
a04e1fde408196e547cf80f8ce9d9391133bd865
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""PyInstaller runtime hook"""
import imp
import sys
import pkgutil
_old_find_module = imp.find_module
def _new_find_module(name, *args, **kwargs):
if name in ['lib', 'gui', 'plugins']:
return (None, name, ('', '', 5))
else:
return _old_find_module(name, *args, **kwargs)
imp.find_module = _new_find_module
_old_load_module = imp.load_module
def _new_load_module(name, file, pathname, description):
if pathname in ['lib', 'gui', 'plugins']:
return __import__(name)
else:
return _old_load_module(name, file, pathname, description)
imp.load_module = _new_load_module
PLUGINS_PREFIX = 'electrum_dash_plugins'
KEYSTORE_PLUGINS = [
'hw_wallet',
'digitalbitbox',
'keepkey',
'ledger',
'trezor',
]
OTHER_PLUGINS= [
'audio_modem',
'cosigner_pool',
'email_requests',
'labels',
'virtualkeyboard',
]
OTHER_PLUGINS = map(lambda p: '%s.%s' % (PLUGINS_PREFIX, p), OTHER_PLUGINS)
PLUGINS = KEYSTORE_PLUGINS + OTHER_PLUGINS
class PluginsImporter(object):
def find_module(self, name):
return self
def load_module(self, name):
if name in KEYSTORE_PLUGINS:
return getattr(__import__('%s.%s' % (PLUGINS_PREFIX, name)), name)
elif name in OTHER_PLUGINS:
return getattr(__import__(name), name.split('.')[-1])
elif name.endswith('.qt'):
split = name.split('.')
if split[0] != split[1]:
plugin_module = getattr(__import__(name), split[-2])
return getattr(plugin_module, 'qt')
else:
path = '.'.join(split[1:])
plugin_module = getattr(__import__(path), split[-2])
return getattr(plugin_module, 'qt')
else:
raise Exception('Can not import %s' % name)
_old_find_loader = pkgutil.find_loader
def _new_find_loader(fullname):
if fullname.startswith('%s.' % PLUGINS_PREFIX):
return PluginsImporter()
else:
return _old_find_loader(fullname)
pkgutil.find_loader = _new_find_loader
_old_iter_modules = pkgutil.iter_modules
def _new_iter_modules(path=None, prefix=''):
if path and len(path) == 1 and path[0].endswith(PLUGINS_PREFIX):
for p in PLUGINS:
yield PluginsImporter(), p, True
else:
for loader, name, ispkg in _old_iter_modules(path, prefix):
yield loader, name, ispkg
pkgutil.iter_modules = _new_iter_modules
| 27.88764
| 78
| 0.637389
|
b8c3b81e718bb43af75247dc3fc71dab2334ae80
| 502
|
py
|
Python
|
strings.py
|
ghanshyam30/python_with_mosh
|
f1defd8785fdec494ae9d657406cddd94771bcd6
|
[
"MIT"
] | null | null | null |
strings.py
|
ghanshyam30/python_with_mosh
|
f1defd8785fdec494ae9d657406cddd94771bcd6
|
[
"MIT"
] | null | null | null |
strings.py
|
ghanshyam30/python_with_mosh
|
f1defd8785fdec494ae9d657406cddd94771bcd6
|
[
"MIT"
] | null | null | null |
# STRINGS
# We can define strings using single (‘ ‘) or double (“ “) quotes.
course_title = "PYTHON FOR BEGINNERS"
# Accessing individual characters or slices
print("Expected O/P:H \nActual O/P:",course_title[3])
print("Expected O/P:S \nActual O/P:",course_title[-1])
# Slicing
print("Expected O/P:YTH \nActual O/P:",course_title[1:4])
# Slicing if you expect reversed output
print("Expected O/P:OHT \nActual O/P:",course_title[4:1:-1])
print("Expected O/P:SRE \nActual O/P:",course_title[-1:-4:-1])
| 38.615385
| 67
| 0.711155
|
83a3c1b76f64fffb124c60a267ee6d47ad1aa433
| 4,278
|
py
|
Python
|
tests/pytests/unit/modules/test_pdbedit.py
|
tomdoherty/salt
|
f87d5d7abbf9777773c4d91fdafecb8b1a728e76
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
tests/pytests/unit/modules/test_pdbedit.py
|
tomdoherty/salt
|
f87d5d7abbf9777773c4d91fdafecb8b1a728e76
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
tests/pytests/unit/modules/test_pdbedit.py
|
tomdoherty/salt
|
f87d5d7abbf9777773c4d91fdafecb8b1a728e76
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
from textwrap import dedent
import pytest
import salt.modules.pdbedit as pdbedit
from tests.support.mock import MagicMock, patch
@pytest.fixture(autouse=True)
def setup_loader(request):
setup_loader_modules = {pdbedit: {}}
with pytest.helpers.loader_mock(request, setup_loader_modules) as loader_mock:
yield loader_mock
@pytest.mark.parametrize("verbose", [True, False])
def test_when_no_users_returned_no_data_should_be_returned(verbose):
expected_users = {} if verbose else []
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": "", "stderr": "", "retcode": 0}
)
},
):
actual_users = pdbedit.list_users(verbose=verbose)
assert actual_users == expected_users
def test_when_verbose_and_retcode_is_nonzero_output_should_be_had():
expected_stderr = "this is something fnord"
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": "", "stderr": expected_stderr, "retcode": 1}
)
},
), patch("salt.modules.pdbedit.log.error", autospec=True) as fake_error_log:
pdbedit.list_users(verbose=True)
actual_error = fake_error_log.mock_calls[0].args[0]
assert actual_error == expected_stderr
def test_when_verbose_and_single_good_output_expected_data_should_be_parsed():
expected_data = {
"roscivs": {
"unix username": "roscivs",
"nt username": "bottia",
"full name": "Roscivs Bottia",
"user sid": "42",
"primary group sid": "99",
"home directory": r"\\samba\roscivs",
"account desc": "separators! xxx so long and thanks for all the fish",
"logoff time": "Sat, 14 Aug 2010 15:06:39 UTC",
"kickoff time": "Sat, 14 Aug 2010 15:06:39 UTC",
"password must change": "never",
}
}
pdb_output = dedent(
r"""
Unix username: roscivs
NT username: bottia
User SID: 42
Primary Group SID: 99
Full Name: Roscivs Bottia
Home Directory: \\samba\roscivs
Account desc: separators! xxx so long and thanks for all the fish
Logoff time: Sat, 14 Aug 2010 15:06:39 UTC
Kickoff time: Sat, 14 Aug 2010 15:06:39 UTC
Password must change: never
"""
).strip()
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": pdb_output, "stderr": "", "retcode": 0}
)
},
):
actual_data = pdbedit.list_users(verbose=True)
assert actual_data == expected_data
def test_when_verbose_and_multiple_records_present_data_should_be_correctly_parsed():
expected_data = {
"roscivs": {
"unix username": "roscivs",
"nt username": "bottia",
"user sid": "42",
},
"srilyk": {
"unix username": "srilyk",
"nt username": "srilyk",
"account desc": "trololollol",
"user sid": "99",
},
"jewlz": {
"unix username": "jewlz",
"nt username": "flutterbies",
"user sid": "4",
},
}
pdb_output = dedent(
"""
-------------
Unix username: roscivs
NT username: bottia
User SID: 42
-------------
Unix username: srilyk
NT username: srilyk
User SID: 99
Account desc: trololol\x1dlol
-------------
Unix username: jewlz
NT username: flutterbies
User SID: 4
-------------
-------------
-------------
"""
).strip()
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": pdb_output, "stderr": "", "retcode": 0}
)
},
):
actual_data = pdbedit.list_users(verbose=True)
assert actual_data == expected_data
| 31
| 85
| 0.531089
|
6ec8fdaff3be970ba79864a32925ed78b62c270f
| 33,638
|
py
|
Python
|
pymux/layout.py
|
timgates42/pymux
|
3f66e62b9de4b2251c7f9afad6c516dc5a30ec67
|
[
"BSD-3-Clause"
] | 1,280
|
2015-04-27T03:59:31.000Z
|
2018-10-25T02:35:00.000Z
|
pymux/layout.py
|
timgates42/pymux
|
3f66e62b9de4b2251c7f9afad6c516dc5a30ec67
|
[
"BSD-3-Clause"
] | 71
|
2015-05-19T14:37:53.000Z
|
2018-09-10T09:37:26.000Z
|
pymux/layout.py
|
timgates42/pymux
|
3f66e62b9de4b2251c7f9afad6c516dc5a30ec67
|
[
"BSD-3-Clause"
] | 80
|
2016-01-03T18:06:55.000Z
|
2018-08-27T00:57:13.000Z
|
# encoding: utf-8
"""
The layout engine. This builds the prompt_toolkit layout.
"""
from __future__ import unicode_literals
from prompt_toolkit.application.current import get_app
from prompt_toolkit.filters import Condition, has_focus
from prompt_toolkit.formatted_text import FormattedText, HTML
from prompt_toolkit.layout.containers import VSplit, HSplit, Window, FloatContainer, Float, ConditionalContainer, Container, WindowAlign, to_container
from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl
from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.layout.dimension import Dimension as D
from prompt_toolkit.layout.dimension import to_dimension, is_dimension
from prompt_toolkit.layout.menus import CompletionsMenu
from prompt_toolkit.layout.processors import BeforeInput, ShowArg, AppendAutoSuggestion, Processor, Transformation, HighlightSelectionProcessor
from prompt_toolkit.layout.screen import Char
from prompt_toolkit.mouse_events import MouseEventType
from prompt_toolkit.widgets import FormattedTextToolbar, TextArea, Dialog, SearchToolbar
from six.moves import range
from functools import partial
import pymux.arrangement as arrangement
import datetime
import weakref
import six
from .filters import WaitsForConfirmation
from .format import format_pymux_string
from .log import logger
__all__ = (
'LayoutManager',
)
class Justify:
" Justify enum for the status bar. "
LEFT = 'left'
CENTER = 'center'
RIGHT = 'right'
_ALL = [LEFT, CENTER, RIGHT]
class Z_INDEX:
HIGHLIGHTED_BORDER = 2
STATUS_BAR = 5
COMMAND_LINE = 6
MESSAGE_TOOLBAR = 7
WINDOW_TITLE_BAR = 8
POPUP = 9
class Background(Container):
"""
Generate the background of dots, which becomes visible when several clients
are attached and not all of them have the same size.
(This is implemented as a Container, rather than a UIControl wrapped in a
Window, because it can be done very effecient this way.)
"""
def reset(self):
pass
def preferred_width(self, max_available_width):
return D()
def preferred_height(self, width, max_available_height):
return D()
def write_to_screen(self, screen, mouse_handlers, write_position,
parent_style, erase_bg, z_index):
" Fill the whole area of write_position with dots. "
default_char = Char(' ', 'class:background')
dot = Char('.', 'class:background')
ypos = write_position.ypos
xpos = write_position.xpos
for y in range(ypos, ypos + write_position.height):
row = screen.data_buffer[y]
for x in range(xpos, xpos + write_position.width):
row[x] = dot if (x + y) % 3 == 0 else default_char
def get_children(self):
return []
# Numbers for the clock and pane numbering.
_numbers = list(zip(*[ # (Transpose x/y.)
['#####', ' #', '#####', '#####', '# #', '#####', '#####', '#####', '#####', '#####'],
['# #', ' #', ' #', ' #', '# #', '# ', '# ', ' #', '# #', '# #'],
['# #', ' #', '#####', '#####', '#####', '#####', '#####', ' #', '#####', '#####'],
['# #', ' #', '# ', ' #', ' #', ' #', '# #', ' #', '# #', ' #'],
['#####', ' #', '#####', '#####', ' #', '#####', '#####', ' #', '#####', '#####'],
]))
def _draw_number(screen, x_offset, y_offset, number, style='class:clock',
transparent=False):
" Write number at position. "
fg = Char(' ', 'class:clock')
bg = Char(' ', '')
for y, row in enumerate(_numbers[number]):
screen_row = screen.data_buffer[y + y_offset]
for x, n in enumerate(row):
if n == '#':
screen_row[x + x_offset] = fg
elif not transparent:
screen_row[x + x_offset] = bg
class BigClock(Container):
"""
Display a big clock.
"""
WIDTH = 28
HEIGHT = 5
def __init__(self, on_click):
assert callable(on_click)
self.on_click = on_click
def reset(self):
pass
def write_to_screen(self, screen, mouse_handlers, write_position,
parent_style, erase_bg, z_index):
xpos = write_position.xpos
ypos = write_position.ypos
# Erase background.
bg = Char(' ', '')
def draw_func():
for y in range(ypos, self.HEIGHT + ypos):
row = screen.data_buffer[y]
for x in range(xpos, xpos + self.WIDTH):
row[x] = bg
# Display time.
now = datetime.datetime.now()
_draw_number(screen, xpos + 0, ypos, now.hour // 10)
_draw_number(screen, xpos + 6, ypos, now.hour % 10)
_draw_number(screen, xpos + 16, ypos, now.minute // 10)
_draw_number(screen, xpos + 23, ypos, now.minute % 10)
# Add a colon
screen.data_buffer[ypos + 1][xpos + 13] = Char(' ', 'class:clock')
screen.data_buffer[ypos + 3][xpos + 13] = Char(' ', 'class:clock')
screen.width = self.WIDTH
screen.height = self.HEIGHT
mouse_handlers.set_mouse_handler_for_range(
x_min=xpos,
x_max=xpos + write_position.width,
y_min=ypos,
y_max=ypos + write_position.height,
handler=self._mouse_handler)
screen.draw_with_z_index(z_index=z_index, draw_func=draw_func)
def _mouse_handler(self, cli, mouse_event):
" Click callback. "
if mouse_event.event_type == MouseEventType.MOUSE_UP:
self.on_click(cli)
else:
return NotImplemented
def preferred_width(self, max_available_width):
return D.exact(BigClock.WIDTH)
def preferred_height(self, width, max_available_height):
return D.exact(BigClock.HEIGHT)
def get_children(self):
return []
class PaneNumber(Container): # XXX: make FormattedTextControl
"""
Number of panes, to be drawn in the middle of the pane.
"""
WIDTH = 5
HEIGHT = 5
def __init__(self, pymux, arrangement_pane):
self.pymux = pymux
self.arrangement_pane = arrangement_pane
def reset(self):
pass
def _get_index(self):
window = self.pymux.arrangement.get_active_window()
try:
return window.get_pane_index(self.arrangement_pane)
except ValueError:
return 0
def preferred_width(self, max_available_width):
# Enough to display all the digits.
return Dimension.exact(6 * len('%s' % self._get_index()) - 1)
def preferred_height(self, width, max_available_height):
return Dimension.exact(self.HEIGHT)
def write_to_screen(self, screen, mouse_handlers, write_position,
parent_style, erase_bg, z_index):
style = 'class:panenumber'
def draw_func():
for i, d in enumerate('%s' % (self._get_index(),)):
_draw_number(screen, write_position.xpos + i * 6, write_position.ypos,
int(d), style=style, transparent=True)
screen.draw_with_z_index(z_index=z_index, draw_func=draw_func)
def get_children(self):
return []
class MessageToolbar(FormattedTextToolbar):
"""
Pop-up (at the bottom) for showing error/status messages.
"""
def __init__(self, client_state):
def get_message():
# If there is a message to be shown for this client, show that.
if client_state.message:
return client_state.message
else:
return ''
def get_tokens():
message = get_message()
if message:
return FormattedText([
('class:message', message),
('[SetCursorPosition]', ''),
('class:message', ' '),
])
else:
return ''
@Condition
def is_visible():
return bool(get_message())
super(MessageToolbar, self).__init__(get_tokens)
class LayoutManager(object):
"""
The main layout class, that contains the whole Pymux layout.
"""
def __init__(self, pymux, client_state):
self.pymux = pymux
self.client_state = client_state
# Popup dialog for displaying keys, etc...
search_textarea = SearchToolbar()
self._popup_textarea = TextArea(scrollbar=True, read_only=True, search_field=search_textarea)
self.popup_dialog = Dialog(
title='Keys',
body=HSplit([
Window(FormattedTextControl(text=''), height=1), # 1 line margin.
self._popup_textarea,
search_textarea,
Window(
FormattedTextControl(
text=HTML('Press [<b>q</b>] to quit or [<b>/</b>] for searching.')),
align=WindowAlign.CENTER,
height=1)
])
)
self.layout = self._create_layout()
# Keep track of render information.
self.pane_write_positions = {}
def reset_write_positions(self):
"""
Clear write positions right before rendering. (They are populated
during rendering).
"""
self.pane_write_positions = {}
def display_popup(self, title, content):
"""
Display a pop-up dialog.
"""
assert isinstance(title, six.text_type)
assert isinstance(content, six.text_type)
self.popup_dialog.title = title
self._popup_textarea.text = content
self.client_state.display_popup = True
get_app().layout.focus(self._popup_textarea)
def _create_select_window_handler(self, window):
" Return a mouse handler that selects the given window when clicking. "
def handler(mouse_event):
if mouse_event.event_type == MouseEventType.MOUSE_DOWN:
self.pymux.arrangement.set_active_window(window)
self.pymux.invalidate()
else:
return NotImplemented # Event not handled here.
return handler
def _get_status_tokens(self):
" The tokens for the status bar. "
result = []
# Display panes.
for i, w in enumerate(self.pymux.arrangement.windows):
if i > 0:
result.append(('', ' '))
if w == self.pymux.arrangement.get_active_window():
style = 'class:window.current'
format_str = self.pymux.window_status_current_format
else:
style = 'class:window'
format_str = self.pymux.window_status_format
result.append((
style,
format_pymux_string(self.pymux, format_str, window=w),
self._create_select_window_handler(w)))
return result
def _get_status_left_tokens(self):
return format_pymux_string(self.pymux, self.pymux.status_left)
def _get_status_right_tokens(self):
return format_pymux_string(self.pymux, self.pymux.status_right)
def _get_align(self):
if self.pymux.status_justify == Justify.RIGHT:
return WindowAlign.RIGHT
elif self.pymux.status_justify == Justify.CENTER:
return WindowAlign.CENTER
else:
return WindowAlign.LEFT
def _before_prompt_command_tokens(self):
return [('class:commandline.prompt', '%s ' % (self.client_state.prompt_text, ))]
def _create_layout(self):
"""
Generate the main prompt_toolkit layout.
"""
waits_for_confirmation = WaitsForConfirmation(self.pymux)
return FloatContainer(
content=HSplit([
# The main window.
FloatContainer(
Background(),
floats=[
Float(width=lambda: self.pymux.get_window_size().columns,
height=lambda: self.pymux.get_window_size().rows,
content=DynamicBody(self.pymux))
]),
# Status bar.
ConditionalContainer(
content=VSplit([
# Left.
Window(
height=1,
width=(lambda: D(max=self.pymux.status_left_length)),
dont_extend_width=True,
content=FormattedTextControl(self._get_status_left_tokens)),
# List of windows in the middle.
Window(
height=1,
char=' ',
align=self._get_align,
content=FormattedTextControl(self._get_status_tokens)),
# Right.
Window(
height=1,
width=(lambda: D(max=self.pymux.status_right_length)),
dont_extend_width=True,
align=WindowAlign.RIGHT,
content=FormattedTextControl(self._get_status_right_tokens))
], z_index=Z_INDEX.STATUS_BAR, style='class:statusbar'),
filter=Condition(lambda: self.pymux.enable_status),
)
]),
floats=[
Float(bottom=1, left=0, z_index=Z_INDEX.MESSAGE_TOOLBAR,
content=MessageToolbar(self.client_state)),
Float(left=0, right=0, bottom=0, content=HSplit([
# Wait for confirmation toolbar.
ConditionalContainer(
content=Window(
height=1,
content=ConfirmationToolbar(self.pymux, self.client_state),
z_index=Z_INDEX.COMMAND_LINE,
),
filter=waits_for_confirmation,
),
# ':' prompt toolbar.
ConditionalContainer(
content=Window(
height=D(min=1), # Can be more if the command is multiline.
style='class:commandline',
dont_extend_height=True,
content=BufferControl(
buffer=self.client_state.command_buffer,
preview_search=True,
input_processors=[
AppendAutoSuggestion(),
BeforeInput(':', style='class:commandline-prompt'),
ShowArg(),
HighlightSelectionProcessor(),
]),
z_index=Z_INDEX.COMMAND_LINE,
),
filter=has_focus(self.client_state.command_buffer),
),
# Other command-prompt commands toolbar.
ConditionalContainer(
content=Window(
height=1,
style='class:commandline',
content=BufferControl(
buffer=self.client_state.prompt_buffer,
input_processors=[
BeforeInput(self._before_prompt_command_tokens),
AppendAutoSuggestion(),
HighlightSelectionProcessor(),
]),
z_index=Z_INDEX.COMMAND_LINE,
),
filter=has_focus(self.client_state.prompt_buffer),
),
])),
# Keys pop-up.
Float(
content=ConditionalContainer(
content=self.popup_dialog,
filter=Condition(lambda: self.client_state.display_popup),
),
left=3, right=3, top=5, bottom=5,
z_index=Z_INDEX.POPUP,
),
Float(xcursor=True, ycursor=True, content=CompletionsMenu(max_height=12)),
]
)
class ConfirmationToolbar(FormattedTextControl):
"""
Window that displays the yes/no confirmation dialog.
"""
def __init__(self, pymux, client_state):
def get_tokens():
return [
('class:question', ' '),
('class:question', format_pymux_string(
pymux, client_state.confirm_text or '')),
('class:question', ' '),
('class:yesno', ' y/n'),
('[SetCursorPosition]', ''),
('class:yesno', ' '),
]
super(ConfirmationToolbar, self).__init__(
get_tokens, style='class:confirmationtoolbar')
class DynamicBody(Container):
"""
The dynamic part, which is different for each CLI (for each client). It
depends on which window/pane is active.
This makes it possible to have just one main layout class, and
automatically rebuild the parts that change if the windows/panes
arrangement changes, without doing any synchronisation.
"""
def __init__(self, pymux):
self.pymux = pymux
self._bodies_for_app = weakref.WeakKeyDictionary() # Maps Application to (hash, Container)
def _get_body(self):
" Return the Container object for the current CLI. "
new_hash = self.pymux.arrangement.invalidation_hash()
# Return existing layout if nothing has changed to the arrangement.
app = get_app()
if app in self._bodies_for_app:
existing_hash, container = self._bodies_for_app[app]
if existing_hash == new_hash:
return container
# The layout changed. Build a new layout when the arrangement changed.
new_layout = self._build_layout()
self._bodies_for_app[app] = (new_hash, new_layout)
return new_layout
def _build_layout(self):
" Rebuild a new Container object and return that. "
logger.info('Rebuilding layout.')
if not self.pymux.arrangement.windows:
# No Pymux windows in the arrangement.
return Window()
active_window = self.pymux.arrangement.get_active_window()
# When zoomed, only show the current pane, otherwise show all of them.
if active_window.zoom:
return to_container(_create_container_for_process(
self.pymux, active_window, active_window.active_pane, zoom=True))
else:
window = self.pymux.arrangement.get_active_window()
return HSplit([
# Some spacing for the top status bar.
ConditionalContainer(
content=Window(height=1),
filter=Condition(lambda: self.pymux.enable_pane_status)),
# The actual content.
_create_split(self.pymux, window, window.root)
])
def reset(self):
for invalidation_hash, body in self._bodies_for_app.values():
body.reset()
def preferred_width(self, max_available_width):
body = self._get_body()
return body.preferred_width(max_available_width)
def preferred_height(self, width, max_available_height):
body = self._get_body()
return body.preferred_height(width, max_available_height)
def write_to_screen(self, screen, mouse_handlers, write_position,
parent_style, erase_bg, z_index):
body = self._get_body()
body.write_to_screen(screen, mouse_handlers, write_position,
parent_style, erase_bg, z_index)
def get_children(self):
# (Required for prompt_toolkit.layout.utils.find_window_for_buffer_name.)
body = self._get_body()
return [body]
class SizedBox(Container):
"""
Container whith enforces a given width/height without taking the children
into account (even if no width/height is given).
:param content: `Container`.
:param report_write_position_callback: `None` or a callable for reporting
back the dimensions used while drawing.
"""
def __init__(self, content, width=None, height=None,
report_write_position_callback=None):
assert is_dimension(width)
assert is_dimension(height)
assert report_write_position_callback is None or callable(report_write_position_callback)
self.content = to_container(content)
self.width = width
self.height = height
self.report_write_position_callback = report_write_position_callback
def reset(self):
self.content.reset()
def preferred_width(self, max_available_width):
return to_dimension(self.width)
def preferred_height(self, width, max_available_height):
return to_dimension(self.height)
def write_to_screen(self, screen, mouse_handlers, write_position,
parent_style, erase_bg, z_index):
# Report dimensions.
if self.report_write_position_callback:
self.report_write_position_callback(write_position)
self.content.write_to_screen(
screen, mouse_handlers, write_position, parent_style, erase_bg, z_index)
def get_children(self):
return [self.content]
def _create_split(pymux, window, split):
"""
Create a prompt_toolkit `Container` instance for the given pymux split.
"""
assert isinstance(split, (arrangement.HSplit, arrangement.VSplit))
is_vsplit = isinstance(split, arrangement.VSplit)
def get_average_weight():
""" Calculate average weight of the children. Return 1 if none of
the children has a weight specified yet. """
weights = 0
count = 0
for i in split:
if i in split.weights:
weights += split.weights[i]
count += 1
if weights:
return max(1, weights // count)
else:
return 1
def report_write_position_callback(item, write_position):
"""
When the layout is rendered, store the actial dimensions as
weights in the arrangement.VSplit/HSplit classes.
This is required because when a pane is resized with an increase of +1,
we want to be sure that this corresponds exactly with one row or
column. So, that updating weights corresponds exactly 1/1 to updating
the size of the panes.
"""
if is_vsplit:
split.weights[item] = write_position.width
else:
split.weights[item] = write_position.height
def get_size(item):
return D(weight=split.weights.get(item) or average_weight)
content = []
average_weight = get_average_weight()
for i, item in enumerate(split):
# Create function for calculating dimensions for child.
width = height = None
if is_vsplit:
width = partial(get_size, item)
else:
height = partial(get_size, item)
# Create child.
if isinstance(item, (arrangement.VSplit, arrangement.HSplit)):
child = _create_split(pymux, window, item)
elif isinstance(item, arrangement.Pane):
child = _create_container_for_process(pymux, window, item)
else:
raise TypeError('Got %r' % (item,))
# Wrap child in `SizedBox` to enforce dimensions and sync back.
content.append(SizedBox(
child, width=width, height=height,
report_write_position_callback=partial(report_write_position_callback, item)))
# Create prompt_toolkit Container.
if is_vsplit:
return_cls = VSplit
padding_char = _border_vertical
else:
return_cls = HSplit
padding_char = _border_horizontal
return return_cls(content,
padding=1,
padding_char=padding_char)
class _UseCopyTokenListProcessor(Processor):
"""
In order to allow highlighting of the copy region, we use a preprocessed
list of (Token, text) tuples. This processor returns just that list for the
given pane.
"""
def __init__(self, arrangement_pane):
self.arrangement_pane = arrangement_pane
def apply_transformation(self, document, lineno, source_to_display, tokens):
tokens = self.arrangement_pane.copy_get_tokens_for_line(lineno)
return Transformation(tokens[:])
def invalidation_hash(self, document):
return document.text
def _create_container_for_process(pymux, window, arrangement_pane, zoom=False):
"""
Create a `Container` with a titlebar for a process.
"""
@Condition
def clock_is_visible():
return arrangement_pane.clock_mode
@Condition
def pane_numbers_are_visible():
return pymux.display_pane_numbers
terminal_is_focused = has_focus(arrangement_pane.terminal)
def get_terminal_style():
if terminal_is_focused():
result = 'class:terminal.focused'
else:
result = 'class:terminal'
return result
def get_titlebar_text_fragments():
result = []
if zoom:
result.append(('class:titlebar-zoom', ' Z '))
if arrangement_pane.process.is_terminated:
result.append(('class:terminated', ' Terminated '))
# Scroll buffer info.
if arrangement_pane.display_scroll_buffer:
result.append(('class:copymode', ' %s ' % arrangement_pane.scroll_buffer_title))
# Cursor position.
document = arrangement_pane.scroll_buffer.document
result.append(('class:copymode.position', ' %i,%i ' % (
document.cursor_position_row, document.cursor_position_col)))
if arrangement_pane.name:
result.append(('class:name', ' %s ' % arrangement_pane.name))
result.append(('', ' '))
return result + [
('', format_pymux_string(pymux, ' #T ', pane=arrangement_pane)) # XXX: Make configurable.
]
def get_pane_index():
try:
w = pymux.arrangement.get_active_window()
index = w.get_pane_index(arrangement_pane)
except ValueError:
index = '/'
return '%3s ' % index
def on_click():
" Click handler for the clock. When clicked, select this pane. "
arrangement_pane.clock_mode = False
pymux.arrangement.get_active_window().active_pane = arrangement_pane
pymux.invalidate()
return HighlightBordersIfActive(
window,
arrangement_pane,
get_terminal_style,
FloatContainer(
HSplit([
# The terminal.
TracePaneWritePosition(
pymux, arrangement_pane,
content=arrangement_pane.terminal),
]),
#
floats=[
# The title bar.
Float(content=
ConditionalContainer(
content=VSplit([
Window(
height=1,
content=FormattedTextControl(
get_titlebar_text_fragments)),
Window(
height=1,
width=4,
content=FormattedTextControl(get_pane_index),
style='class:paneindex')
], style='class:titlebar'),
filter=Condition(lambda: pymux.enable_pane_status)),
left=0, right=0, top=-1, height=1, z_index=Z_INDEX.WINDOW_TITLE_BAR),
# The clock.
Float(
content=ConditionalContainer(BigClock(on_click),
filter=clock_is_visible)),
# Pane number.
Float(content=ConditionalContainer(
content=PaneNumber(pymux, arrangement_pane),
filter=pane_numbers_are_visible)),
]
)
)
class _ContainerProxy(Container):
def __init__(self, content):
self.content = content
def reset(self):
self.content.reset()
def preferred_width(self, max_available_width):
return self.content.preferred_width(max_available_width)
def preferred_height(self, width, max_available_height):
return self.content.preferred_height(width, max_available_height)
def write_to_screen(self, screen, mouse_handlers, write_position, parent_style, erase_bg, z_index):
self.content.write_to_screen(screen, mouse_handlers, write_position, parent_style, erase_bg, z_index)
def get_children(self):
return [self.content]
_focused_border_titlebar = '┃'
_focused_border_vertical = '┃'
_focused_border_horizontal = '━'
_focused_border_left_top = '┏'
_focused_border_right_top = '┓'
_focused_border_left_bottom = '┗'
_focused_border_right_bottom = '┛'
_border_vertical = '│'
_border_horizontal = '─'
_border_left_bottom = '└'
_border_right_bottom = '┘'
_border_left_top = '┌'
_border_right_top = '┐'
class HighlightBordersIfActive(object):
"""
Put borders around this control if active.
"""
def __init__(self, window, pane, style, content):
@Condition
def is_selected():
return window.active_pane == pane
def conditional_float(char, left=None, right=None, top=None,
bottom=None, width=None, height=None):
return Float(
content=ConditionalContainer(
Window(char=char, style='class:border'),
filter=is_selected),
left=left, right=right, top=top, bottom=bottom, width=width, height=height,
z_index=Z_INDEX.HIGHLIGHTED_BORDER)
self.container = FloatContainer(
content,
style=style,
floats=[
# Sides.
conditional_float(_focused_border_vertical, left=-1, top=0, bottom=0, width=1),
conditional_float(_focused_border_vertical, right=-1, top=0, bottom=0, width=1),
conditional_float(_focused_border_horizontal, left=0, right=0, top=-1, height=1),
conditional_float(_focused_border_horizontal, left=0, right=0, bottom=-1, height=1),
# Corners.
conditional_float(_focused_border_left_top, left=-1, top=-1, width=1, height=1),
conditional_float(_focused_border_right_top, right=-1, top=-1, width=1, height=1),
conditional_float(_focused_border_left_bottom, left=-1, bottom=-1, width=1, height=1),
conditional_float(_focused_border_right_bottom, right=-1, bottom=-1, width=1, height=1),
])
def __pt_container__(self):
return self.container
class TracePaneWritePosition(_ContainerProxy): # XXX: replace with SizedBox
" Trace the write position of this pane. "
def __init__(self, pymux, arrangement_pane, content):
content = to_container(content)
_ContainerProxy.__init__(self, content)
self.pymux = pymux
self.arrangement_pane = arrangement_pane
def write_to_screen(self, screen, mouse_handlers, write_position, parent_style, erase_bg, z_inedx):
_ContainerProxy.write_to_screen(self, screen, mouse_handlers, write_position, parent_style, erase_bg, z_inedx)
self.pymux.get_client_state().layout_manager.pane_write_positions[self.arrangement_pane] = write_position
def focus_left(pymux):
" Move focus to the left. "
_move_focus(pymux,
lambda wp: wp.xpos - 2, # 2 in order to skip over the border.
lambda wp: wp.ypos)
def focus_right(pymux):
" Move focus to the right. "
_move_focus(pymux,
lambda wp: wp.xpos + wp.width + 1,
lambda wp: wp.ypos)
def focus_down(pymux):
" Move focus down. "
_move_focus(pymux,
lambda wp: wp.xpos,
lambda wp: wp.ypos + wp.height + 2)
# 2 in order to skip over the border. Only required when the
# pane-status is not shown, but a border instead.
def focus_up(pymux):
" Move focus up. "
_move_focus(pymux,
lambda wp: wp.xpos,
lambda wp: wp.ypos - 2)
def _move_focus(pymux, get_x, get_y):
" Move focus of the active window. "
window = pymux.arrangement.get_active_window()
try:
write_pos = pymux.get_client_state().layout_manager.pane_write_positions[window.active_pane]
except KeyError:
pass
else:
x = get_x(write_pos)
y = get_y(write_pos)
# Look for the pane at this position.
for pane, wp in pymux.get_client_state().layout_manager.pane_write_positions.items():
if (wp.xpos <= x < wp.xpos + wp.width and
wp.ypos <= y < wp.ypos + wp.height):
window.active_pane = pane
return
| 35.633475
| 150
| 0.579969
|
7cfd4c2dae80b92585f726933df87d32bcaeba19
| 292
|
py
|
Python
|
output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_max_inclusive_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_max_inclusive_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_max_inclusive_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.nist_data.atomic.g_year_month.schema_instance.nistschema_sv_iv_atomic_g_year_month_max_inclusive_2_xsd.nistschema_sv_iv_atomic_g_year_month_max_inclusive_2 import NistschemaSvIvAtomicGYearMonthMaxInclusive2
__all__ = [
"NistschemaSvIvAtomicGYearMonthMaxInclusive2",
]
| 48.666667
| 225
| 0.907534
|
d61d032636d74646bfadfaf8bd4b180cb4f990f0
| 1,898
|
py
|
Python
|
nuvem-parallel/nuvem/lookup.py
|
isabella232/nuvem
|
b92a21dc6d902845810a67711e25a3f6fa9fb9f3
|
[
"Apache-2.0"
] | 4
|
2016-03-11T21:13:57.000Z
|
2021-11-10T13:38:53.000Z
|
nuvem-parallel/nuvem/lookup.py
|
apache/nuvem
|
b92a21dc6d902845810a67711e25a3f6fa9fb9f3
|
[
"Apache-2.0"
] | 1
|
2021-11-04T13:04:16.000Z
|
2021-11-04T13:04:16.000Z
|
nuvem-parallel/nuvem/lookup.py
|
isabella232/nuvem
|
b92a21dc6d902845810a67711e25a3f6fa9fb9f3
|
[
"Apache-2.0"
] | 9
|
2015-12-05T21:09:42.000Z
|
2021-11-10T13:35:50.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Return a list of name value pairs that match a name
def get(r, n, l):
def isList(v):
if getattr(v, '__iter__', False) == False:
return False
if isinstance(v, basestring) or isinstance(v, dict):
return False
return True
def isAssoc(v):
return isList(v) and len(v) == 2 and isinstance(v[0], basestring) and v[0][0:1] == "'"
def lookup(nv, lv):
if lv == ():
return ()
# Check if list element is a name value pair assoc
a = lv[0]
if not isAssoc(a):
return lookup(nv, lv[1:])
# Got a match, return it and lookup rest of the list
an = "'" + a[0][2:] if a[0][0:2] == "'@" else a[0]
if an == nv:
return (a,) + lookup(nv, lv[1:])
# No match, lookup rest of the list
return lookup(nv, lv[1:])
def qsymbol(x):
if not isinstance(x, basestring):
return x
return x if x[0:1] == "'" else "'" + x
nv = n.get(r)
lv = l.get(r)
return lookup(qsymbol(nv), () if lv is None else lv)
| 33.892857
| 94
| 0.612223
|
bdbac9caac2368234fa6f9c4434329f6a00d0316
| 10,610
|
py
|
Python
|
configs/models/lenet.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
configs/models/lenet.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
configs/models/lenet.py
|
jamesoneill12/LayerFusion
|
99cba1030ed8c012a453bc7715830fc99fb980dc
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os, sys
import shutil
import time
import math
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from datetime import timedelta
import logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format="%(asctime)s %(message)s", datefmt="%m-%d %H:%M")
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
optim_names = sorted(name for name in optim.__dict__
if callable(optim.__dict__[name]))
def parse_args():
parser = argparse.ArgumentParser(description='Implementation of iterative pruning in the paper: '
'Learning both Weights and Connections for Efficient Neural Networks')
parser.add_argument('--data', '-d', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names))
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('-o', '--optimizer', default='SGD', metavar='O',
choices=optim_names,
help='optimizers: ' + ' | '.join(optim_names) +
' (default: SGD)')
parser.add_argument('-m', '--max_epochs', default=5, type=int,
metavar='E',
help='max number of epochs while training')
parser.add_argument('-c', '--interval', default=5, type=int,
metavar='I',
help='checkpointing interval')
parser.add_argument('--lr', '--learning-rate', default=0.0001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight_decay', '--wd', default=0.005, type=float,
metavar='W', help='weight decay')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('-t', '--topk', default=[1,5],
metavar='T',
nargs='+', type=int,
help='Top k precision metrics')
parser.add_argument('--cuda', action='store_true')
return parser.parse_args()
def adjust_learning_rate(optimizer, lr, verbose=False):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
# lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if verbose:
print(optimizer.param_groups)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def validate(model, dataloader, topk, cuda=False):
'''
validate the model on a given dataset
:param
model: specify the model to be validated
dataloader: a loader for the dataset to be validated on
topk: a list that specifies which top k scores we want
cuda: whether cuda is used
:return:
all the top k precision scores
'''
scores = [AverageMeter() for _ in topk]
# switch to evaluate mode
model.eval()
start = time.time()
print('Validating ', end='', flush=True)
for i, (input, target) in enumerate(dataloader):
if cuda:
input = input.cuda()
target = target.cuda(async=True)
# input_var = Variable(input, volatile=True)
# target_var = Variable(target, volatile=True)
# compute output
# output = model(input_var)
output = model(input)
# measure accuracy
precisions = accuracy(output.data, target, topk=topk)
# top1.update(prec1[0], input.size(0))
# top5.update(prec5[0], input.size(0))
for i, s in enumerate(scores):
s.update(precisions[i][0], input.size(0))
if i % 20 == 0:
print('.', end='', flush=True)
time_elapse = time.time() - start
print('\ninference time:', str(timedelta(seconds=time_elapse)))
# print(' * Prec@1 {top1.avg:.3f}% Prec@5 {top5.avg:.3f}%'
# .format(top1=top1, top5=top5))
ret = list(map(lambda x:x.avg, scores))
string = ' '.join(['Prec@%d: %.3f%%' % (topk[i], a) for i, a in enumerate(ret)])
print(' *', string)
# return top1.avg, top5.avg
return ret
def save_checkpoint(state, filename='checkpoint.pth.tar', dir=None, is_best=False):
if dir is not None and not os.path.exists(dir):
os.makedirs(dir)
filename = filename if dir is None else os.path.join(dir, filename)
torch.save(state, filename)
if is_best:
bestname = 'model_best.pth.tar'
if dir is not None:
bestname = os.path.join(dir, bestname)
shutil.copyfile(filename, bestname)
def load_checkpoint(filename='checkpoint.pth.tar', dir=None):
assert dir is None or os.path.exists(dir)
if dir:
filename = os.path.join(dir, filename)
return torch.load(filename)
def get_loaders(args):
batch_size = args.batch_size
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
testdir = os.path.join(args.data, 'test')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
trainset = datasets.ImageFolder(
traindir,
transform
)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transform),
batch_size=batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(testdir, transform),
batch_size=batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
return train_loader, val_loader, test_loader
def get_mnist_loaders(args):
batch_size = args.batch_size
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
trainset = datasets.MNIST(root='D:/data/mnist', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=1)
testset = datasets.MNIST(root='D:/data/mnist', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=1)
return trainloader, testloader, testloader
def get_svhn_loaders(args):
batch_size = args.batch_size
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = datasets.SVHN(root='D:/data/svhn', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=1)
testset = datasets.SVHN(root='D:/data/svhn', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=1)
return trainloader, testloader, testloader
def get_fashion_loaders(args):
batch_size = args.batch_size
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = datasets.FashionMNIST(root='D:/data/fashion-mnist', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=1)
testset = datasets.FashionMNIST(root='D:/data/fashion-mnist', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=1)
return trainloader, testloader, testloader
def get_cifar10_loaders(args):
batch_size = args.batch_size
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = datasets.CIFAR10(root='D:/data/cifar10', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=1)
testset = datasets.CIFAR10(root='D:/data/cifar10', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=1)
return trainloader, testloader, testloader
def converged(old, new):
converge = True
for old_score, new_score in zip(old, new):
converge = converge and abs(old_score - new_score) < 0.001
return converge
| 39.007353
| 119
| 0.637795
|
864322c2a8a04a0ba3d9f7c9146cb233ef0a9d15
| 1,531
|
py
|
Python
|
tests/core/test_bitops.py
|
greysonDEV/SHA-256
|
512de78a7261728316f79de5be2ca8b6eddebbb6
|
[
"MIT"
] | null | null | null |
tests/core/test_bitops.py
|
greysonDEV/SHA-256
|
512de78a7261728316f79de5be2ca8b6eddebbb6
|
[
"MIT"
] | null | null | null |
tests/core/test_bitops.py
|
greysonDEV/SHA-256
|
512de78a7261728316f79de5be2ca8b6eddebbb6
|
[
"MIT"
] | null | null | null |
from sha256.core.bitops import binary, prepad, add, twos
def test_binary():
result = binary(0)
expected = [0]
assert result == expected
result = binary(10)
expected = [1,0,1,0]
assert result == expected
result = binary(528193)
expected = [1,0,0,0,0,0,0,0,1,1,1,1,0,1,0,0,0,0,0,1]
assert result == expected
def test_prepad():
result = prepad([0,0,1,1], to=0)
expected = [0,0,1,1]
assert result == expected
result = prepad([1,1,1,1], to=8)
expected = [0,0,0,0,1,1,1,1]
assert result == expected
result = prepad([0,0,0,0,1,1,1,1], to=32)
expected = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1]
assert result == expected
def test_add():
result = add([1,0,1,0,1,0,1,0], [1,1,0,0,1,1,0,0])
expected = [1,0,1,1,1,0,1,1,0]
assert result == expected
result = add([1,0,0,1,1,1,0,0,1], [1,1,1,0,0,0,1,1,0])
expected = [1,0,1,1,1,1,1,1,1,1]
assert result == expected
result = add([0,0,1,1,1,0,1,0,0,1], [1,0,0,1,1,1,0,0,1,1])
expected = [1,1,0,1,0,1,1,1,0,0]
assert result == expected
def test_twos():
result = twos([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0])
expected = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0]
assert result == expected
result = twos([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,1,0,1,1,1,1,0,1,0])
expected = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,0,1,0,0,0,0,1,1,0]
assert result == expected
| 30.62
| 84
| 0.547355
|
6961e1d83e9a66f473c0cac181bc4b1b38859c11
| 1,865
|
py
|
Python
|
python/desc/twinkles/analyseICat.py
|
LSSTDESC/Twinkles
|
86d6caebde7f965b76ded80df0a1b66254f2a866
|
[
"MIT"
] | 6
|
2017-02-10T00:38:09.000Z
|
2018-12-13T17:12:08.000Z
|
python/desc/twinkles/analyseICat.py
|
LSSTDESC/Twinkles
|
86d6caebde7f965b76ded80df0a1b66254f2a866
|
[
"MIT"
] | 101
|
2016-11-29T15:31:00.000Z
|
2019-08-23T19:14:19.000Z
|
python/desc/twinkles/analyseICat.py
|
LSSTDESC/Twinkles
|
86d6caebde7f965b76ded80df0a1b66254f2a866
|
[
"MIT"
] | 4
|
2017-01-05T20:28:40.000Z
|
2018-12-13T17:12:09.000Z
|
from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
def readPhoSimInstanceCatalog(fname,
names=['obj', 'SourceID', 'RA', 'DEC', 'MAG_NORM',\
'SED_NAME', 'REDSHIFT', 'GAMMA1',\
'GAMMA2', 'MU', 'DELTA_RA', 'DELTA_DEC',\
'SOURCE_TYPE', 'DUST_REST_NAME',\
'Av', 'Rv', 'Dust_Lab_Name', 'EBV']):
"""
read the phoSimInstanceCatalog and return the contents
Parameters
----------
fname : mandatory, string
filename of the phosim instance catalog
names : a list of column names matching the number of columns
Returns
-------
A `pandas.DataFrame` with the phosim Instance Catalog with metadata
accessed as a dictionary through the meta attribute of the return.
"""
# read the header into a metadata list, and get number of lines to skip
# for catalog
metalines = []
with open(fname) as f:
linenum = 0
for line in f:
if line.startswith('object'):
continue
metalines.append(line)
linenum += 1
# process the headers into a metadata list
meta = metadataFromLines(metalines)
# read the catalog into a dataframe
df = pd.read_csv(fname, skiprows=linenum, names=names, sep='\s+')
df.meta = meta
return df
def metadataFromLines(lines):
"""
process the metadata lines into a dictionary
"""
info = [line.split() for line in lines]
meta = {key: np.float(value) for key, value in info}
return meta
if __name__ == "__main__":
meta, df = readPhoSimInstanceCatalog('/Users/rbiswas/src/LSST/sims_catUtils/examples/SNOnlyPhoSimCatalog.dat')
print(df.head())
| 30.080645
| 114
| 0.589812
|
08662e3756cccdb6402a64b83a1e68427c059d1f
| 32,469
|
py
|
Python
|
xpsi/PostProcessing/_spectrum.py
|
DevarshiChoudhury/xpsi
|
200b82b4ef4a4e7342fc30dd03c5821cff0031c2
|
[
"MIT"
] | 14
|
2019-09-26T12:08:06.000Z
|
2021-05-11T15:26:10.000Z
|
xpsi/PostProcessing/_spectrum.py
|
DevarshiChoudhury/xpsi
|
200b82b4ef4a4e7342fc30dd03c5821cff0031c2
|
[
"MIT"
] | 13
|
2020-01-10T11:03:28.000Z
|
2021-10-04T14:44:01.000Z
|
xpsi/PostProcessing/_spectrum.py
|
DevarshiChoudhury/xpsi
|
200b82b4ef4a4e7342fc30dd03c5821cff0031c2
|
[
"MIT"
] | 9
|
2020-03-04T13:28:05.000Z
|
2021-09-28T09:00:50.000Z
|
from __future__ import division
from ._global_imports import *
try:
import fgivenx
except ImportError:
_warning('Cannot import fgivenx for conditional posterior contours.')
fgivenx = None
from ..tools.phase_integrator import phase_integrator
from ..tools.energy_interpolator import energy_interpolator
from ..tools.phase_interpolator import phase_interpolator as interp
from ._signalplot import SignalPlot
class SpectrumPlot(SignalPlot):
""" Plot posterior-averaged channel count-rate spectra.
The figure contains three panels which share phase as an x-axis:
* the top panel displays the specific photon flux signal
from the source, resolved as a function of energy, optionally showing
both unattenuated and attenuated incident spectra and optionally
using :mod:`fgivenx`;
* the center panel displays the posterior expectation of the count-rate
signal as a function of channel and phase, optionally including an
expected background signal;
* the bottom panel displays the phase-integrated (averaged) count-rate
spectum as a function of channel number, optionally including an
expected background signal and optionally using :mod:`fgivenx`.
The top and bottom panels aim to render the conditional posterior
distribution of the associated signal as a function an energy (proxy)
variable, ideally with contours to map out the conditional posterior mass.
These panels have space to optionally display other elements such as:
the posterior-expected total signal; the posterior-expected component
signals; the true total and component signals if the ground truth (the
injected signal correpsonding to some model parameter vector) is known;
attenuated incident spectra; and the summation of posterior-expected
total (component-summed) source count-rate signals with posterior-expected
background count-rate signals.
The following example is (improved) from :ref:`R19`:
.. image:: _static/_spectrumplot.png
:param float rel_num_energies:
The number of energies desired for interpolation as a fraction of the
number of energies implemented for the original incident signal
integration. The energy set will be appropriately spaced and bounded.
:param int num_phases:
The number of phases to interpolate the pulse-profile signals at for
the center panel.
:param str registered_cmap:
Colormap name from :mod:`matplotlib` to use for the posterior-expected
registered signal as a function of channel and phase (center panel).
:param bool show_components:
If the :class:`~.Signal.Signal` instance has multiple components (hot
region signals), display the posterior expectations of those components
as a function of energy (top panel) and channel (bottom panel).
:param bool show_attenuated:
If the source signal is attenuated by the interstellar absorption
processes, display the posterior-expected attenuated incident specific
photon flux spectra? This switch also instructs :mod:`fgivenx`, if
enabled, to generate conditional posterior contours for the attenuated
spectrum instead of the unattenuated spectrum (top panel). If
:mod:`fgivenx` is not invoked, this switch instructs the plotting
of sample-by-sample attenuated total (component-summed) spectra to
delineate the distribution of conditional posterior mass (top panel).
:param dict expectation_line_kwargs:
Keyword arguments for plotting the posterior-expected signal lines (in
the top and bottom panels).
:param bool add_background:
Add an posterior-expected background count-rate signal to the total
(component-summed) expected source count-rate signals in the center
and bottom panels?
:param dict background_line_kwargs:
Keyword arguments for plotting the posterior-expected spectrum in the
bottom panel that includes the background signal.
:param bool use_fgivenx:
Use :mod:`fgivenx` to plot conditional posterior contours in the
top and bottom panels?
:param dict incident_contour_kwargs:
Keyword arguments for :mod:`fgivenx` incident signal contours (top
panel) that will take precedence over the corresponding class
attributes. (See the :class:`~.SignalPlot` class if you choose
not to modify these attributes on this present subclass.)
:param dict registered_contour_kwargs:
Keyword arguments for :mod:`fgivenx` registered signal contours (bottom
panel) that will take precedence over the corresponding class
attributes. (See the :class:`~.SignalPlot` class if you choose
not to modify these attributes on this present subclass.)
:param plot_truth:
Plot the ground truth (injected) signal, if known and available, in
the top and bottom panels.
:param truth_line_kwargs:
Keyword arguments for plotting the ground truth signal lines (top and
bottom panels).
:param comp_truth_line_kwargs:
Keyword arguments for plotting the component ground truth signal lines
(top and bottom panels).
"""
__figtype__ = 'signalplot_spectrum'
# do not change at runtime (see base class comments):
__caching_targets__ = ['shifts',
'signals', # count-rate signals
# incident specific flux signals
'incident_specific_flux_signals']
__rows__ = 3
__columns__ = 1
__ax_rows__ = 2
__ax_columns__ = 1
__height_ratios__ = [1,2]
__width_ratios__ = [1]
__wspace__ = 0.025
__hspace__ = 0.175
@make_verbose('Instantiating a spectrum plotter for posterior checking',
'Spectrum plotter instantiated')
def __init__(self,
rel_num_energies=10.0,
num_phases=1000,
registered_cmap='inferno',
show_components=False,
show_attenuated=True,
expectation_line_kwargs=None,
comp_expectation_line_kwargs=None,
add_background=False,
background_line_kwargs=None,
sample_line_kwargs=None,
use_fgivenx=False,
incident_contour_kwargs=None,
registered_contour_kwargs=None,
plot_truth=False,
truth_line_kwargs=None,
comp_truth_line_kwargs=None,
**kwargs):
try:
_shadow = not self._logspace_y
except AttributeError:
_shadow = True
if _shadow: # shadow class attribute
kwargs.setdefault('logspace_y', True)
if not kwargs.get('logspace_y'):
yield ('Spectrum plots may have conditional-probability '
'contour artefacts if logspace_y is not True.')
super(SpectrumPlot, self).__init__(**kwargs)
self._rel_num_energies = rel_num_energies
self._phases = _np.linspace(0.0, 2.0, int(num_phases))
self._show_attenuated = show_attenuated
self._add_background = add_background
if add_background: # count-rate spectrum
self.__caching_targets__.append('background_signal')
if use_fgivenx and fgivenx is None:
raise ImportError('Install fgivenx to plot contours.')
self._use_fgivenx = use_fgivenx
if self._use_fgivenx:
self._incident_contour_kwargs =\
incident_contour_kwargs if incident_contour_kwargs else {}
self._registered_contour_kwargs =\
registered_contour_kwargs if registered_contour_kwargs else {}
self._get_figure()
fig = self._fig
gs = self._gs
cls = type(self)
# for the incident specific flux signal incident on an instrument
gs_top = gridspec.GridSpecFromSubplotSpec(1, 2,
subplot_spec=gs[0,0],
wspace=cls.__wspace__,
width_ratios=[50,1])
self._ax_incident = fig.add_subplot(gs_top[0,0])
# for the count-rate signal registered by an an instrument
gs_bottom = gridspec.GridSpecFromSubplotSpec(2, 2,
subplot_spec=gs[1,0],
wspace=cls.__wspace__,
hspace=0.125*self._fscale,
height_ratios=[1,1],
width_ratios=[50,1])
self._ax_registered = fig.add_subplot(gs_bottom[0,0])
self._ax_registered_1d = fig.add_subplot(gs_bottom[1,0])
self._axes = [self._ax_incident,
self._ax_registered,
self._ax_registered_1d]
# incident axis properties
self._ax_incident.set_xlabel(r'$E$ [keV]')
self._ax_incident.set_xscale('log')
self._ax_incident.set_ylabel(r'photons/keV/cm$^{2}$/s')
self._ax_incident.set_yscale('log')
# registered axis properties
for ax in self._axes[1:]:
ax.set_xscale('log')
self._ax_registered.tick_params(axis='x', which='both',
labelbottom=False)
self._ax_registered_1d.set_xlabel('channel')
self._ax_registered.set_ylabel(r'$\phi$ [cycles]')
self._ax_registered.yaxis.set_major_locator(MultipleLocator(0.5))
self._ax_registered.yaxis.set_minor_locator(MultipleLocator(0.1))
self._ax_registered.set_ylim([0.0,2.0])
self._ax_registered_1d.set_ylabel('counts/s')
self._ax_registered_1d.set_yscale('log')
# colorbars
if use_fgivenx:
self._ax_incident_cb = fig.add_subplot(gs_top[0,1])
self._axes.append(self._ax_incident_cb)
self._ax_registered_1d_cb = fig.add_subplot(gs_bottom[1,1])
self._axes.append(self._ax_registered_1d_cb)
self._ax_registered_cb = fig.add_subplot(gs_bottom[0,1])
self._axes.append(self._ax_registered_cb)
self._show_components = show_components
self._registered_cmap = registered_cmap
if sample_line_kwargs is not None:
self._sample_line_kwargs = sample_line_kwargs
else:
self._sample_line_kwargs = {}
self._plot_truth = plot_truth
if self._plot_truth:
if truth_line_kwargs is None:
self._truth_line_kwargs = \
dict(color=('b' if self._use_fgivenx else 'darkgreen'),
ls='-.',
lw=1.0,
alpha=1.0)
else:
self._truth_line_kwargs = truth_line_kwargs
if comp_truth_line_kwargs is not None:
self._comp_truth_line_kwargs = comp_truth_line_kwargs
else:
self._comp_truth_line_kwargs = self._truth_line_kwargs
if expectation_line_kwargs is None:
color = 'k' if self._use_fgivenx else 'r'
self._expectation_line_kwargs = dict(color=color,
ls='-',
lw=1.0,
alpha=1.0)
else:
self._expectation_line_kwargs = expectation_line_kwargs
if comp_expectation_line_kwargs is not None:
self._comp_expectation_line_kwargs = comp_expectation_line_kwargs
else:
self._comp_expectation_line_kwargs = self._expectation_line_kwargs
if background_line_kwargs is None:
self._background_line_kwargs = dict(color='orange',
ls='-',
lw=1.0,
alpha=1.0)
else:
self._background_line_kwargs = background_line_kwargs
plt.close()
yield
@property
def instruction_set(self):
return self._instruction_set
@instruction_set.deleter
def instruction_set(self):
try:
del self._instruction_set
except AttributeError:
pass
@make_verbose('SpectrumPlot object iterating over samples',
'SpectrumPlot object finished iterating')
def execute(self, thetas, wrapper):
self._num_samples = thetas.shape[0]
self._energies = self._signal.create_energy_array(self._rel_num_energies)
if self._use_fgivenx:
# determine which spectrum to compute contours for
if self._show_attenuated:
wrapped = wrapper(self, 'incident_sums')
self._instruction_set = 1
# calculate expected unattenuated incident
for i in range(self._num_samples):
_ = wrapped(None, thetas[i,:])
# rewrap to reset the cache iterator
wrapped = wrapper(self, 'attenuated_incident_sums')
self._instruction_set = 0
else:
wrapped = wrapper(self, 'incident_sums')
self._instruction_set = 1
self._add_incident_contours(wrapped, thetas)
self._instruction_set = 2
self._add_registered_contours(wrapper(self, 'registered_sums'),
thetas)
yield 'Added conditional posterior contours for incident spectrum.'
if self._add_background:
self._instruction_set = 3
wrapped = wrapper(self, 'background_sum')
for i in range(self._num_samples):
wrapped(None, thetas[i,:])
else:
del self.instruction_set
wrapped = wrapper(self, ['attenuated_incident_sums',
'incident_sums',
'registered_sums'])
for i in range(self._num_samples):
wrapped(None, thetas[i,:])
yield
def next(self):
""" Update posterior expected signals given the updated signal.
Plots signals if :mod:`fgivenx` is not used, otherwise returns
callback information for :mod:`fgivenx`.
.. note::
You cannot make an iterator from an instance of this class.
"""
try:
self._instruction_set
except AttributeError:
if self._show_attenuated:
signal = self._handle_attenuated_incident()
self._add_signal(self._ax_incident,
self._energies,
signal,
**self._sample_line_kwargs)
signal = self._handle_incident()
if not self._show_attenuated:
self._add_signal(self._ax_incident,
self._energies,
signal,
**self._sample_line_kwargs)
signal = self._handle_registered()
self._add_registered_spectrum(self._ax_registered_1d,
signal,
**self._sample_line_kwargs)
if self._add_background:
self._handle_background() # nothing to plot here
else:
if self._instruction_set == 0:
return self._handle_attenuated_incident() # end execution here
if self._instruction_set == 1:
return self._handle_incident()
if self._instruction_set == 2:
return self._handle_registered()
if self._instruction_set == 3:
self._handle_background() # nothing to return
return None # reached if not invoking fgivenx
def _handle_attenuated_incident(self):
""" Instructions for handling the attenuated incident spectrum. """
ref = self._signal
try:
self._attenuated_incident_sums
except AttributeError:
self._attenuated_incident_sums = [None]*len(ref.incident_specific_flux_signals)
attenuated_incident = None
for i, component in enumerate(ref.incident_specific_flux_signals):
temp = phase_integrator(1.0,
_np.array([0.0,1.0]),
component,
ref.phases[i],
0.0)
temp = energy_interpolator(1, # threads
temp,
_np.log10(ref.energies),
_np.log10(self._energies)).reshape(-1)
# requires a user implementation or will raise NotImplementedError
ref.interstellar(self._energies, temp)
try:
attenuated_incident += temp
except TypeError:
attenuated_incident = temp
try:
self._attenuated_incident_sums[i] += temp
except TypeError:
self._attenuated_incident_sums[i] = temp.copy()
return attenuated_incident
@property
def attenuated_incident_sums(self):
return self._attenuated_incident_sums
@attenuated_incident_sums.deleter
def attenuated_incident_sums(self):
try:
del self._attenuated_incident_sums
except AttributeError:
pass
@property
def expected_attenuated_incident(self):
""" Get the expectations of the incident (component) spectra. """
return [component/self._num_samples for component \
in self._attenuated_incident_sums]
def _handle_incident(self):
""" Instructions for handling the unattenuated incident spectrum. """
ref = self._signal
try:
self._incident_sums
except AttributeError:
self._incident_sums = [None] * len(ref.incident_specific_flux_signals)
incident = None
for i, component in enumerate(ref.incident_specific_flux_signals):
temp = phase_integrator(1.0,
_np.array([0.0,1.0]),
component,
ref.phases[i],
0.0)
temp = energy_interpolator(1, # threads
temp,
_np.log10(ref.energies),
_np.log10(self._energies)).reshape(-1)
try:
incident += temp
except TypeError:
incident = temp
try:
self._incident_sums[i] += temp
except TypeError:
self._incident_sums[i] = temp.copy()
return incident
@property
def incident_sums(self):
return self._incident_sums
@incident_sums.deleter
def incident_sums(self):
try:
del self._incident_sums
except AttributeError:
pass
@property
def expected_incident(self):
""" Get the expectations of the incident (component) spectra. """
return [component/self._num_samples for component in self._incident_sums]
def _handle_registered(self):
""" Instructions for handling the registered spectrum. """
ref = self._signal
try:
self._registered_sums
except AttributeError:
self._registered_sums = [None] * len(ref.signals)
registered = None
for i, component in enumerate(ref.signals):
temp = phase_integrator(1.0,
_np.array([0.0,1.0]),
component,
ref.phases[i],
0.0).reshape(-1)
try:
registered += temp
except TypeError:
registered = temp
try:
self._registered_sums[i] += component
except TypeError:
self._registered_sums[i] = component
return registered
@property
def registered_sums(self):
return self._registered_sums
@registered_sums.deleter
def registered_sums(self):
try:
del self._registered_sums
except AttributeError:
pass
@property
def expected_registered(self):
""" Get the expectations of the registered count-rate spectra. """
return [component/self._num_samples for component in self._registered_sums]
def _handle_background(self):
""" Instructions for handling the background spectrum. """
ref = self._signal
try:
self._background_sum
except AttributeError:
self._background_sum = None
background = ref.background_signal
try:
self._background_sum += background
except TypeError:
self._background_sum = background
return None
@property
def background_sum(self):
return self._background_sum
@background_sum.deleter
def background_sum(self):
try:
del self._background_sum
except AttributeError:
pass
@property
def expected_background(self):
""" Get the expectation of the background count-rate spectrum. """
mean = self._background_sum/self._num_samples
# transform to count rate signal, assuming background signal
# is in units of counts; subclass and overwrite if you want to change
return mean/self._signal.data.exposure_time
@make_verbose('SpectrumPlot object finalizing',
'SpectrumPlot object finalized')
def finalize(self):
""" Execute final instructions. """
ref = self._signal
self._plot_components = self._show_components and ref.num_components > 1
# add the incident signals
if self._plot_truth:
self._add_true_incident_signals()
self._add_expected_incident_signals()
# add the registered signals
if self._plot_truth:
self._add_true_registered_signals()
self._add_expected_registered_signals()
def _add_true_incident_signals(self):
""" Render ground truth incident (component) signals. """
ref = self._signal
total = None
for component, phases in zip(ref.incident_specific_flux_signals,
ref.phases):
temp = phase_integrator(1.0,
_np.array([0.0,1.0]),
component,
phases,
0.0)
temp = energy_interpolator(1, # threads
temp,
_np.log10(ref.energies),
_np.log10(self._energies)).reshape(-1)
if self._show_attenuated:
ref.interstellar(self.energies, temp)
try:
total += temp
except TypeError:
total = temp
if self._plot_components:
self._add_signal(self._ax_incident,
self._energies,
temp,
**self._comp_truth_line_kwargs)
self._add_signal(self._ax_incident,
self._energies,
total,
**self._truth_line_kwargs)
def _add_expected_incident_signals(self):
""" Render posterior-expected incident (component) signals. """
ax = self._ax_incident
view_y_bottom = ax.yaxis.get_view_interval()[0]
ref = self._signal
total = None
for component in self.expected_incident:
try:
total += component
except TypeError:
total = component
if self._plot_components:
self._add_signal(ax,
self._energies,
component,
**self._comp_expectation_line_kwargs)
self._add_signal(ax,
self._energies,
total,
**self._expectation_line_kwargs)
if self._show_attenuated:
total = None
for component in self.expected_attenuated_incident:
try:
total += component
except TypeError:
total = component
if self._plot_components:
self._add_signal(ax,
self._energies,
component,
**self._comp_expectation_line_kwargs)
self._add_signal(ax,
self._energies,
total,
**self._expectation_line_kwargs)
ax.set_ylim(bottom = view_y_bottom)
ax.set_xlim([self._signal.energy_edges[0],
self._signal.energy_edges[-1]])
locmaj = LogLocator(base=10.0, numticks=100)
ax.yaxis.set_major_locator(locmaj)
locmin = LogLocator(base=10.0, subs=_np.arange(2,10)*0.1, numticks=100)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(NullFormatter())
def _add_true_registered_signals(self):
""" Render ground truth registered (component) signals. """
ref = self._signals
total = None
for component, phases in zip(ref.signals, ref.phases):
temp = phase_integrator(1.0,
_np.array([0.0,1.0]),
component,
phases,
0.0).reshape(-1)
try:
total += temp
except TypeError:
toral = temp
if self._plot_components:
self._add_registered_spectrum(self._ax_registered_1d,
temp,
**self._comp_truth_line_kwargs)
self._add_registered_spectrum(self._ax_registered_1d,
total,
**self._truth_line_kwargs)
if self._add_background: # another line including the background
total += ref.background_signal
self._add_registered_spectrum(self._ax_registered_1d,
total,
**self._background_line_kwargs)
def _add_expected_registered_signals(self):
""" Render posterior-expected registered (component) signals. """
ref = self._signal
total = None
for component, phases in zip(self.expected_registered, ref.phases):
temp = phase_integrator(1.0,
_np.array([0.0,1.0]),
component,
phases,
0.0).reshape(-1)
try:
total += temp
except TypeError:
total = temp
if self._plot_components:
self._add_registered_spectrum(self._ax_registered_1d,
temp,
**self._comp_expectation_line_kwargs)
# 1D
self._add_registered_spectrum(self._ax_registered_1d,
total,
**self._expectation_line_kwargs)
if self._add_background: # another line including the background
total += self.expected_background
self._add_registered_spectrum(self._ax_registered_1d,
total,
**self._background_line_kwargs)
ax = self._ax_registered_1d
ax.set_xlim([ref.data.channels[0],
ref.data.channels[-1]])
locmaj = LogLocator(base=10.0, numticks=100)
ax.yaxis.set_major_locator(locmaj)
locmin = LogLocator(base=10.0, subs=_np.arange(2,10)*0.1, numticks=100)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(NullFormatter())
# 2D
total = None
for component, shift, phases in zip(self.expected_registered,
ref.shifts, ref.phases):
temp = interp(self._phases,
phases,
component,
shift)
try:
total += temp
except TypeError:
total = temp
if self._add_background: # add expectated background
for i in range(total.shape[1]):
total[:,i] += self.expected_background
registered = self._ax_registered.pcolormesh(ref.data.channels,
self._phases,
total.T, # channel number as x-axis
cmap = cm.get_cmap(self._registered_cmap),
linewidth = 0,
rasterized = self._rasterized)
registered.set_edgecolor('face')
self._ax_registered.set_xlim([ref.data.channels[0],
ref.data.channels[-1]])
self._registered_cb = plt.colorbar(registered,
cax=self._ax_registered_cb,
ticks=_get_default_locator(None),
format=_get_default_formatter())
self._registered_cb.ax.set_frame_on(True)
self._registered_cb.ax.yaxis.set_minor_locator(AutoMinorLocator())
self._registered_cb.set_label(label=r'counts/s', labelpad=15)
def _add_registered_spectrum(self, ax, spectrum, **kwargs):
""" Add registered spectrum line as a function of channel number. """
if not kwargs:
kwargs.update(dict(color='k', linestyle='-', lw=0.05, alpha=1.0))
elif 'ls' in kwargs:
kwargs['linestyle'] = kwargs.pop('ls')
ax.step(self._signal.data.channels,
spectrum,
where='mid',
**kwargs)
@make_verbose('Adding credible intervals on the incident specific photon '
'flux spectrum',
'Credible intervals added')
def _add_incident_contours(self, callback, thetas):
""" Add contours to 1D incident specific flux spectrum axes objects. """
self._add_contours(callback, thetas, self._energies,
self._ax_incident,
self._ax_incident_cb,
**self._incident_contour_kwargs)
label = r'$\pi(\mathrm{photons/keV/cm}^{2}\mathrm{/s};E)$'
self._ax_incident_cb.set_ylabel(label)
@make_verbose('Adding credible intervals on the count-rate spectrum',
'Credible intervals added')
def _add_registered_contours(self, callback, thetas):
""" Add contours to 1D count-rate spectrum axes objects. """
self._add_contours(callback, thetas, self._signal.data.channels,
self._ax_registered_1d,
self._ax_registered_1d_cb,
**self._registered_contour_kwargs)
self._ax_registered_1d_cb.set_ylabel(r'$\pi(\mathrm{counts/s};\mathrm{channel})$')
| 38.470379
| 91
| 0.560134
|
2fb843391c823c0c9cf63109e755bf17c567e242
| 3,222
|
py
|
Python
|
pacK/modul2.py
|
Nika1411/Laba17
|
408fd297495e199d8489617ebdf6fe10d7c04fc2
|
[
"MIT"
] | null | null | null |
pacK/modul2.py
|
Nika1411/Laba17
|
408fd297495e199d8489617ebdf6fe10d7c04fc2
|
[
"MIT"
] | null | null | null |
pacK/modul2.py
|
Nika1411/Laba17
|
408fd297495e199d8489617ebdf6fe10d7c04fc2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Вариант 14
# Создать класс Payment (зарплата). В классе должны быть представлены поля: фамилия-
# имя-отчество, оклад, год поступления на работу, процент надбавки, подоходный налог,
# количество отработанных дней в месяце, количество рабочих дней в месяце, начисленная и
# удержанная суммы. Реализовать методы: вычисления начисленной суммы, вычисления
# удержанной суммы, вычисления суммы, выдаваемой на руки, вычисления стажа. Стаж
# вычисляется как полное количество лет, прошедших от года поступления на работу, до теку-
# щего года. Начисления представляют собой сумму, начисленную за отработанные дни, и
# надбавки, то есть доли от первой суммы. Удержания представляют собой отчисления в
# пенсионный фонд (1% от начисленной суммы) и подоходный налог. Подоходный налог
# составляет 13% от начисленной суммы без отчислений в пенсионный фонд.
class Payment:
def __init__(self, full_name=' ', salary=0, year=0, percent=0, daysworked=0, workingdays=1):
self.__full_name = str(full_name)
self.__salary = int(salary)
self.__year = int(year)
self.__percent = float(percent)
self.__days_worked = int(daysworked)
self.__working_days = int(workingdays)
# self.amount = 0
# self.held_amount = 0
# self.hand_amount = 0
# self.exp = 0
# self.accrued_amount()
# self.withheld_amount()
# self.handed_amount()
# self.experience()
def accrued_amount(self):
a = self.__salary / self.__working_days
b = a * self.__days_worked
percent = self.__percent / 100 + 1
return b * percent
def withheld_amount(self):
b = (self.__salary / self.__working_days) * self.__days_worked
return b * (0.13 + 0.01)
def handed_amount(self):
a = self.__salary / self.__working_days
b = a * self.__days_worked
percent = self.__percent / 100 + 1
return b * percent - (self.__salary / self.__working_days) * self.__days_worked
def experience(self):
return 2020 - self.__year
# def __str__(self):
# return f"Experience: {self.exp} years \nCalculations: {self.amount} - {self.held_amount} = {self.hand_amount}"
def __lt__(self, other):
return self.__salary < other.__salary
def __eq__(self, other):
return self.__salary == other.__salary
def __ne__(self, other):
return self.__salary != other.__salary
def __gt__(self, other):
return self.__salary > other.__salary
def __ge__(self, other):
return self.__salary >= other.__salary
def __le__(self, other):
return self.__salary <= other.__salary
def __truediv__(self, other):
if self.__salary >= other.__salary:
return self.__salary / other.__salary
else:
return other.__salary / self.__salary
def __sub__(self, other):
if self.__days_worked >= other.__days_worked:
return self.__days_worked - other.__days_worked
else:
return other.__days_worked - self.__days_worked
def __add__(self, other):
return self.__working_days + other.__working_days
| 35.8
| 120
| 0.667908
|
5bbeead3df22125db183e582d4b92a991181da8b
| 35
|
py
|
Python
|
vkbottle/dispatch/return_manager/__init__.py
|
homus32/vkbottle
|
8247665ef74835abe0c2c5e5981826540d0ecdb5
|
[
"MIT"
] | 698
|
2019-08-09T17:32:52.000Z
|
2021-07-22T08:30:32.000Z
|
vkbottle/dispatch/return_manager/__init__.py
|
homus32/vkbottle
|
8247665ef74835abe0c2c5e5981826540d0ecdb5
|
[
"MIT"
] | 216
|
2019-08-18T19:22:50.000Z
|
2021-07-30T12:15:17.000Z
|
vkbottle/dispatch/return_manager/__init__.py
|
homus32/vkbottle
|
8247665ef74835abe0c2c5e5981826540d0ecdb5
|
[
"MIT"
] | 268
|
2019-08-10T14:52:04.000Z
|
2021-07-28T07:06:42.000Z
|
from .abc import BaseReturnManager
| 17.5
| 34
| 0.857143
|
d010d090643176a7143a847e73080106627a44af
| 1,720
|
py
|
Python
|
setup.py
|
parrondo/test
|
f2c88ad12e3715f002aecf54a06dc518aa9171e0
|
[
"MIT"
] | null | null | null |
setup.py
|
parrondo/test
|
f2c88ad12e3715f002aecf54a06dc518aa9171e0
|
[
"MIT"
] | null | null | null |
setup.py
|
parrondo/test
|
f2c88ad12e3715f002aecf54a06dc518aa9171e0
|
[
"MIT"
] | null | null | null |
import os
import re
from setuptools import setup, find_packages
regexp = re.compile(r'.*__version__ = [\'\"](.*?)[\'\"]', re.S)
base_package = 'test'
base_path = os.path.dirname(__file__)
init_file = os.path.join(base_path, 'src', 'test', '__init__.py')
with open(init_file, 'r') as f:
module_content = f.read()
match = regexp.match(module_content)
if match:
version = match.group(1)
else:
raise RuntimeError(
'Cannot find __version__ in {}'.format(init_file))
with open('README.rst', 'r') as f:
readme = f.read()
with open('CHANGELOG.rst', 'r') as f:
changes = f.read()
def parse_requirements(filename):
''' Load requirements from a pip requirements file '''
with open(filename, 'r') as fd:
lines = []
for line in fd:
line.strip()
if line and not line.startswith("#"):
lines.append(line)
return lines
requirements = parse_requirements('requirements.txt')
if __name__ == '__main__':
setup(
name='test',
description='A description of the package',
long_description='\n\n'.join([readme, changes]),
license='MIT license',
url='https://github.com/GithubUserName/test',
version=version,
author='Your Name',
author_email='',
maintainer='Your Name',
maintainer_email='',
install_requires=requirements,
keywords=['test'],
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=False,
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.6']
)
| 28.196721
| 65
| 0.589535
|
0ef7598dfc450c658627b0f48e6d6050d763d93e
| 2,719
|
py
|
Python
|
ch05_CNN/00_lenet.py
|
insaneyilin/d2l_pytorch_notes
|
aa607b3ff0f5e47659dc4cdbdb4d13bff630a4e4
|
[
"Apache-2.0"
] | null | null | null |
ch05_CNN/00_lenet.py
|
insaneyilin/d2l_pytorch_notes
|
aa607b3ff0f5e47659dc4cdbdb4d13bff630a4e4
|
[
"Apache-2.0"
] | null | null | null |
ch05_CNN/00_lenet.py
|
insaneyilin/d2l_pytorch_notes
|
aa607b3ff0f5e47659dc4cdbdb4d13bff630a4e4
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import torch
from torch import nn, optim
import sys
sys.path.append("..") # to find d2lzh_pytorch
import d2lzh_pytorch as d2l
# check device
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(torch.__version__)
print(device)
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 6, 5), # in_channels, out_channels, kernel_size
nn.Sigmoid(),
nn.MaxPool2d(2, 2), # kernel_size, stride
nn.Conv2d(6, 16, 5),
nn.Sigmoid(),
nn.MaxPool2d(2, 2)
)
self.fc = nn.Sequential(
nn.Linear(16 * 4 * 4, 120),
nn.Sigmoid(),
nn.Linear(120, 84),
nn.Sigmoid(),
nn.Linear(84, 10)
)
def forward(self, img):
feature = self.conv(img)
output = self.fc(feature.view(img.shape[0], -1))
return output
lenet = LeNet()
print(lenet)
# load data
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size)
# training function
def train(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs):
net = net.to(device)
print("training on ", device)
loss = torch.nn.CrossEntropyLoss()
batch_count = 0
for epoch in range(num_epochs):
train_loss_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
# iterate during each epoch
for X, y in train_iter:
X = X.to(device)
y = y.to(device)
y_hat = net(X)
l = loss(y_hat, y) # calc loss
optimizer.zero_grad() # clear grad. info
l.backward() # backward propagation
optimizer.step() # optimization
train_loss_sum += l.cpu().item() # accumulate training loss
# accumulate training accurate predicted samples
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
n += y.shape[0] # accumulate total number of samples during training process
batch_count += 1 # number of batch counts
# evaluate after one epoch's training is done
test_acc = d2l.evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
% (epoch + 1, train_loss_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))
# learning rate, number of epochs
lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(lenet.parameters(), lr=lr)
train(lenet, train_iter, test_iter, batch_size, optimizer, device, num_epochs)
| 33.158537
| 108
| 0.604266
|
74c87c03b633954b6407919ec06a67ec3ace61e2
| 450
|
py
|
Python
|
src/data-structures/heaps/test_minHeap.py
|
Sunhick/csci-2270
|
2a65cf088010b2d508833700da05ed607a84d24c
|
[
"MIT"
] | 1
|
2017-05-04T05:01:13.000Z
|
2017-05-04T05:01:13.000Z
|
src/data-structures/heaps/test_minHeap.py
|
Sunhick/csci-2270
|
2a65cf088010b2d508833700da05ed607a84d24c
|
[
"MIT"
] | null | null | null |
src/data-structures/heaps/test_minHeap.py
|
Sunhick/csci-2270
|
2a65cf088010b2d508833700da05ed607a84d24c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
minHeaps
Pythonic implementation of Heaps
"""
__author__ = "Sunil"
__email__ = "suba5417@colorado.edu"
import unittest
from minHeap import MinHeap
class test_MinHeap(unittest.TestCase):
"""
Test heap create, delete, insert
"""
heap = None
def __init__(self):
self.heap = MinHeap()
def setUp(self):
pass
def tearDown(self):
pass
def testCase(self):
pass
| 13.636364
| 38
| 0.626667
|
493f544865cd9bfe74771a0df347ed3eb1bcf2d2
| 932
|
py
|
Python
|
text/detector/utils/python_nms.py
|
kernelforce/invoice
|
58c4e24484b3e47ed8aaf227964b372eac475227
|
[
"MIT"
] | 1,017
|
2019-08-02T04:18:35.000Z
|
2022-03-29T08:18:03.000Z
|
text/detector/utils/python_nms.py
|
guanshuicheng/-
|
21ea131ed8680dbb1740c45d690d6ed0e3eb4798
|
[
"MIT"
] | 47
|
2019-08-08T08:36:48.000Z
|
2022-03-08T07:00:29.000Z
|
text/detector/utils/python_nms.py
|
guanshuicheng/-
|
21ea131ed8680dbb1740c45d690d6ed0e3eb4798
|
[
"MIT"
] | 300
|
2019-08-03T03:06:30.000Z
|
2022-03-31T02:20:11.000Z
|
import numpy as np
def nms(boxes, threshold, method='Union'):
if boxes.size==0:
return np.empty((0,3))
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = (x2-x1+1) * (y2-y1+1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size>0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o<=threshold)]
pick = pick[0:counter]
return pick
| 28.242424
| 54
| 0.483906
|
c8f6a3771f8427ef7281eb590e15c5795998de56
| 514
|
py
|
Python
|
chapter5/rgb_hsv_grayscale.py
|
vizcacha/practicalcv
|
6bd8170bbb7325585811938fd35069f5e1b5605e
|
[
"MIT"
] | null | null | null |
chapter5/rgb_hsv_grayscale.py
|
vizcacha/practicalcv
|
6bd8170bbb7325585811938fd35069f5e1b5605e
|
[
"MIT"
] | null | null | null |
chapter5/rgb_hsv_grayscale.py
|
vizcacha/practicalcv
|
6bd8170bbb7325585811938fd35069f5e1b5605e
|
[
"MIT"
] | null | null | null |
from SimpleCV import Image, Display
import time
displayObject = Display()
img = Image('starry_night.png')
print 'Initial: %s' % (img.getPixel(25, 25),)
img.save(displayObject)
time.sleep(3)
hsv = img.toHSV()
print 'HSV: %s' % (hsv.getPixel(25, 25),)
hsv.save(displayObject)
time.sleep(3)
rgb = hsv.toRGB()
print 'RGB: %s' % (rgb.getPixel(25, 25),)
rgb.save(displayObject)
time.sleep(3)
gray = img.grayscale()
print 'Grayscale: %s' % (gray.getPixel(25, 25),)
gray.save(displayObject)
time.sleep(3)
| 13.891892
| 48
| 0.678988
|
de4ec424d0e27b4cdaef58efb807d12a1530b565
| 2,033
|
py
|
Python
|
venv/Lib/site-packages/pyrogram/raw/types/input_report_reason_geo_irrelevant.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/input_report_reason_geo_irrelevant.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/input_report_reason_geo_irrelevant.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class InputReportReasonGeoIrrelevant(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.ReportReason`.
Details:
- Layer: ``117``
- ID: ``0xdbd4feed``
**No parameters required.**
"""
__slots__: List[str] = []
ID = 0xdbd4feed
QUALNAME = "types.InputReportReasonGeoIrrelevant"
def __init__(self) -> None:
pass
@staticmethod
def read(data: BytesIO, *args: Any) -> "InputReportReasonGeoIrrelevant":
# No flags
return InputReportReasonGeoIrrelevant()
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
return data.getvalue()
| 31.765625
| 103
| 0.640433
|
a8511c4934ca02239845af28e1db32dcb2c9f2e0
| 3,388
|
py
|
Python
|
scripts/gha/install_prereqs_desktop.py
|
oliwilkinsonio/firebase-cpp-sdk
|
1a2790030e92f77ad2aaa87000a1222d12dcabfc
|
[
"Apache-2.0"
] | 193
|
2019-03-18T16:30:43.000Z
|
2022-03-30T17:39:32.000Z
|
scripts/gha/install_prereqs_desktop.py
|
oliwilkinsonio/firebase-cpp-sdk
|
1a2790030e92f77ad2aaa87000a1222d12dcabfc
|
[
"Apache-2.0"
] | 647
|
2019-03-18T20:50:41.000Z
|
2022-03-31T18:32:33.000Z
|
scripts/gha/install_prereqs_desktop.py
|
oliwilkinsonio/firebase-cpp-sdk
|
1a2790030e92f77ad2aaa87000a1222d12dcabfc
|
[
"Apache-2.0"
] | 86
|
2019-04-21T09:40:38.000Z
|
2022-03-26T20:48:37.000Z
|
#!/usr/bin/env python
# Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script needs to be run once to prepare the machine for building SDK.
It will download required python dependencies and also install ccache on mac/linux.
ccache considerably improves the build times.
Please note that this script is aganostic of various desktop configurations.
For example, you can run it once regardless if you are following up with a build of x86 or x64.
Run this script from the root of the repository
Usage:
python scripts/gha/install_prereqs_desktop.py
"""
import utils
def main():
# Install protobuf on linux/mac if its not installed already
if not utils.is_command_installed('protoc'):
if utils.is_linux_os():
# sudo apt install protobuf-compiler
utils.run_command(['apt', 'install', '-y','protobuf-compiler'], as_root=True)
elif utils.is_mac_os():
# brew install protobuf
utils.run_command(['brew', 'install', 'protobuf'])
# Install go on linux/mac if its not installed already
if not utils.is_command_installed('go'):
if utils.is_linux_os():
# sudo apt install -y golang
utils.run_command(['apt', 'install', '-y','golang'], as_root=True)
elif utils.is_mac_os():
# brew install protobuf
utils.run_command(['brew', 'install', 'go'])
# Install openssl on linux/mac if its not installed already
if not utils.is_command_installed('go'):
if utils.is_linux_os():
# sudo apt install -y openssl
utils.run_command(['apt', 'install', '-y','openssl'], as_root=True)
elif utils.is_mac_os():
# brew install protobuf
utils.run_command(['brew', 'install', 'openssl'])
# Install ccache on linux/mac if its not installed already
if not utils.is_command_installed('ccache'):
if utils.is_linux_os():
# sudo apt install ccache
utils.run_command(['apt', 'install', '-y', 'ccache'], as_root=True)
elif utils.is_mac_os():
# brew install ccache
utils.run_command(['brew', 'install', 'ccache'])
# Install clang-format on linux/mac if its not installed already
if not utils.is_command_installed('clang-format'):
if utils.is_linux_os():
# sudo apt install clang-format
utils.run_command(['apt', 'install', '-y','clang-format'], as_root=True)
elif utils.is_mac_os():
# brew install protobuf
utils.run_command(['brew', 'install', 'clang-format'])
# Install required python dependencies.
# On Catalina, python2 in installed as default python.
# Example command:
# python3 -m pip install -r external/pip_requirements.txt --user
utils.run_command(
['python3' if utils.is_command_installed('python3') else 'python', '-m',
'pip', 'install', '-r', 'external/pip_requirements.txt', '--user'] )
if __name__ == '__main__':
main()
| 37.230769
| 95
| 0.691854
|
683c226df95cfdc762243d09f84ed53114ccc011
| 946
|
py
|
Python
|
myutils.py
|
hiszm/credit_card_scan
|
c2ebde77b5e878c57e514c5c2fed72444271ad6c
|
[
"Xnet",
"X11"
] | null | null | null |
myutils.py
|
hiszm/credit_card_scan
|
c2ebde77b5e878c57e514c5c2fed72444271ad6c
|
[
"Xnet",
"X11"
] | null | null | null |
myutils.py
|
hiszm/credit_card_scan
|
c2ebde77b5e878c57e514c5c2fed72444271ad6c
|
[
"Xnet",
"X11"
] | null | null | null |
import cv2
def sort_contours(cnts, method="left-to-right"):
reverse = False
i = 0
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# 用一个最小的矩形,把找到的形状包起来x,y,h,w
# 执行一个排序操作,得到是排序完整
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b: b[1][i], reverse=reverse))
return cnts, boundingBoxes
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
resized = cv2.resize(image, dim, interpolation=inter)
return resized
| 30.516129
| 80
| 0.585624
|
a85d162629ba65d91d742e16a7a29b7a6544640d
| 867
|
py
|
Python
|
pyone/server.py
|
andromedia/addon-pyone
|
cf72fc27ac2267169c6c9ecf48ca3739249a51fe
|
[
"Apache-2.0"
] | null | null | null |
pyone/server.py
|
andromedia/addon-pyone
|
cf72fc27ac2267169c6c9ecf48ca3739249a51fe
|
[
"Apache-2.0"
] | null | null | null |
pyone/server.py
|
andromedia/addon-pyone
|
cf72fc27ac2267169c6c9ecf48ca3739249a51fe
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 www.privaz.io Valletech AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import environ
# check if test stub featuers are enabled
_test_fixture = (environ.get("PYONE_TEST_FIXTURE", "False").lower() in ["1", "yes", "true"])
if _test_fixture is False:
from . import OneServer
else:
from pyone.tester import OneServerTester as OneServer
| 34.68
| 92
| 0.756632
|
3573f06c6b6a0d836950f5f5b719a700e5b4f319
| 5,890
|
py
|
Python
|
Contents/Libraries/Shared/js2py/legecy_translators/translator.py
|
fossabot/Sub-Zero.bundle
|
fb2210f2fd9003b68f78f4c878f4428ec4aa0d4f
|
[
"MIT"
] | 1,553
|
2015-11-09T02:17:06.000Z
|
2022-03-31T20:24:52.000Z
|
Contents/Libraries/Shared/js2py/legecy_translators/translator.py
|
saiterlz/Sub-Zero.bundle
|
1a0bb9c3e4be84be35d46672907783363fe5a87b
|
[
"MIT"
] | 691
|
2015-11-05T21:32:26.000Z
|
2022-03-17T10:52:45.000Z
|
Contents/Libraries/Shared/js2py/legecy_translators/translator.py
|
saiterlz/Sub-Zero.bundle
|
1a0bb9c3e4be84be35d46672907783363fe5a87b
|
[
"MIT"
] | 162
|
2015-11-06T19:38:55.000Z
|
2022-03-16T02:42:41.000Z
|
from flow import translate_flow
from constants import remove_constants, recover_constants
from objects import remove_objects, remove_arrays, translate_object, translate_array, set_func_translator
from functions import remove_functions, reset_inline_count
from jsparser import inject_before_lval, indent, dbg
TOP_GLOBAL = '''from js2py.pyjs import *\nvar = Scope( JS_BUILTINS )\nset_global_object(var)\n'''
def translate_js(js, top=TOP_GLOBAL):
"""js has to be a javascript source code.
returns equivalent python code."""
# Remove constant literals
no_const, constants = remove_constants(js)
#print 'const count', len(constants)
# Remove object literals
no_obj, objects, obj_count = remove_objects(no_const)
#print 'obj count', len(objects)
# Remove arrays
no_arr, arrays, arr_count = remove_arrays(no_obj)
#print 'arr count', len(arrays)
# Here remove and replace functions
reset_inline_count()
no_func, hoisted, inline = remove_functions(no_arr)
#translate flow and expressions
py_seed, to_register = translate_flow(no_func)
# register variables and hoisted functions
#top += '# register variables\n'
top += 'var.registers(%s)\n' % str(to_register + hoisted.keys())
#Recover functions
# hoisted functions recovery
defs = ''
#defs += '# define hoisted functions\n'
#print len(hoisted) , 'HH'*40
for nested_name, nested_info in hoisted.iteritems():
nested_block, nested_args = nested_info
new_code = translate_func('PyJsLvalTempHoisted', nested_block,
nested_args)
new_code += 'PyJsLvalTempHoisted.func_name = %s\n' % repr(nested_name)
defs += new_code + '\nvar.put(%s, PyJsLvalTempHoisted)\n' % repr(
nested_name)
#defs += '# Everting ready!\n'
# inline functions recovery
for nested_name, nested_info in inline.iteritems():
nested_block, nested_args = nested_info
new_code = translate_func(nested_name, nested_block, nested_args)
py_seed = inject_before_lval(py_seed,
nested_name.split('@')[0], new_code)
# add hoisted definitiond - they have literals that have to be recovered
py_seed = defs + py_seed
#Recover arrays
for arr_lval, arr_code in arrays.iteritems():
translation, obj_count, arr_count = translate_array(
arr_code, arr_lval, obj_count, arr_count)
py_seed = inject_before_lval(py_seed, arr_lval, translation)
#Recover objects
for obj_lval, obj_code in objects.iteritems():
translation, obj_count, arr_count = translate_object(
obj_code, obj_lval, obj_count, arr_count)
py_seed = inject_before_lval(py_seed, obj_lval, translation)
#Recover constants
py_code = recover_constants(py_seed, constants)
return top + py_code
def translate_func(name, block, args):
"""Translates functions and all nested functions to Python code.
name - name of that function (global functions will be available under var while
inline will be available directly under this name )
block - code of the function (*with* brackets {} )
args - arguments that this function takes"""
inline = name.startswith('PyJsLvalInline')
real_name = ''
if inline:
name, real_name = name.split('@')
arglist = ', '.join(args) + ', ' if args else ''
code = '@Js\ndef %s(%sthis, arguments, var=var):\n' % (name, arglist)
# register local variables
scope = "'this':this, 'arguments':arguments" #it will be a simple dictionary
for arg in args:
scope += ', %s:%s' % (repr(arg), arg)
if real_name:
scope += ', %s:%s' % (repr(real_name), name)
code += indent('var = Scope({%s}, var)\n' % scope)
block, nested_hoisted, nested_inline = remove_functions(block)
py_code, to_register = translate_flow(block)
#register variables declared with var and names of hoisted functions.
to_register += nested_hoisted.keys()
if to_register:
code += indent('var.registers(%s)\n' % str(to_register))
for nested_name, info in nested_hoisted.iteritems():
nested_block, nested_args = info
new_code = translate_func('PyJsLvalTempHoisted', nested_block,
nested_args)
# Now put definition of hoisted function on the top
code += indent(new_code)
code += indent(
'PyJsLvalTempHoisted.func_name = %s\n' % repr(nested_name))
code += indent(
'var.put(%s, PyJsLvalTempHoisted)\n' % repr(nested_name))
for nested_name, info in nested_inline.iteritems():
nested_block, nested_args = info
new_code = translate_func(nested_name, nested_block, nested_args)
# Inject definitions of inline functions just before usage
# nested inline names have this format : LVAL_NAME@REAL_NAME
py_code = inject_before_lval(py_code,
nested_name.split('@')[0], new_code)
if py_code.strip():
code += indent(py_code)
return code
set_func_translator(translate_func)
#print inject_before_lval(' chuj\n moj\n lval\nelse\n', 'lval', 'siema\njestem piter\n')
import time
#print time.time()
#print translate_js('if (1) console.log("Hello, World!"); else if (5) console.log("Hello world?");')
#print time.time()
t = """
var x = [1,2,3,4,5,6];
for (var e in x) {console.log(e); delete x[3];}
console.log(5 in [1,2,3,4,5]);
"""
SANDBOX = '''
import traceback
try:
%s
except:
print traceback.format_exc()
print
raw_input('Press Enter to quit')
'''
if __name__ == '__main__':
# test with jq if works then it really works :)
#with open('jq.js', 'r') as f:
#jq = f.read()
#res = translate_js(jq)
res = translate_js(t)
dbg(SANDBOX % indent(res))
print 'Done'
| 38.75
| 105
| 0.662479
|
2d65ffc2d0625f7f84bf4e16e749bccacf5a1d50
| 1,613
|
py
|
Python
|
umap/settings/__init__.py
|
sosm/umap
|
7893ff1c7d943b7fb71b74dbd8a3b47b7b104ca3
|
[
"WTFPL"
] | 1
|
2020-12-13T20:56:53.000Z
|
2020-12-13T20:56:53.000Z
|
umap/settings/__init__.py
|
sosm/umap
|
7893ff1c7d943b7fb71b74dbd8a3b47b7b104ca3
|
[
"WTFPL"
] | 1
|
2020-07-11T08:38:44.000Z
|
2020-07-11T08:38:44.000Z
|
umap/settings/__init__.py
|
sosm/umap
|
7893ff1c7d943b7fb71b74dbd8a3b47b7b104ca3
|
[
"WTFPL"
] | 3
|
2019-06-27T06:46:05.000Z
|
2019-12-12T09:46:12.000Z
|
import imp
import os
import sys
from django.utils.termcolors import colorize
from .base import * # NOQA, default values
# Allow to override setting from any file, may be out of the PYTHONPATH,
# to make it easier for non python people.
path = os.environ.get('UMAP_SETTINGS')
if not path:
# Retrocompat
path = os.path.join('/etc', 'umap', 'umap.conf')
if not os.path.exists(path):
# Retrocompat
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'local.py')
if not os.path.exists(path):
msg = ('You must configure UMAP_SETTINGS or define '
'/etc/umap/umap.conf')
print(colorize(msg, fg='red'))
sys.exit(1)
d = imp.new_module('config')
d.__file__ = path
try:
with open(path) as config_file:
exec(compile(config_file.read(), path, 'exec'), d.__dict__)
except IOError as e:
msg = 'Unable to import {} from UMAP_SETTINGS'.format(path)
print(colorize(msg, fg='red'))
sys.exit(e)
else:
print('Loaded local config from', path)
for key in dir(d):
if key.isupper():
value = getattr(d, key)
if key.startswith('LEAFLET_STORAGE'):
# Retrocompat pre 1.0, remove me in 1.1.
globals()['UMAP' + key[15:]] = value
elif key == 'UMAP_CUSTOM_TEMPLATES':
globals()['TEMPLATES'][0]['DIRS'].insert(0, value)
elif key == 'UMAP_CUSTOM_STATICS':
globals()['STATICFILES_DIRS'].insert(0, value)
else:
globals()[key] = value
| 33.604167
| 72
| 0.584005
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.