hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794e6076d0019b30f164eb0177f2c860a5934b77
| 2,137
|
py
|
Python
|
ros2_control_bolt_bringup/launch/bolt_system_position_only_gazebo.launch.py
|
Benjamin-Amsellem/ros2_control_bolt
|
ae91f30826eddf2bed7cd5b69f6bab12c6b7dcc8
|
[
"Apache-2.0"
] | null | null | null |
ros2_control_bolt_bringup/launch/bolt_system_position_only_gazebo.launch.py
|
Benjamin-Amsellem/ros2_control_bolt
|
ae91f30826eddf2bed7cd5b69f6bab12c6b7dcc8
|
[
"Apache-2.0"
] | null | null | null |
ros2_control_bolt_bringup/launch/bolt_system_position_only_gazebo.launch.py
|
Benjamin-Amsellem/ros2_control_bolt
|
ae91f30826eddf2bed7cd5b69f6bab12c6b7dcc8
|
[
"Apache-2.0"
] | null | null | null |
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription
from launch.substitutions import Command, FindExecutable, PathJoinSubstitution
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackageShare
def generate_launch_description():
gazebo = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
[PathJoinSubstitution([FindPackageShare("gazebo_ros"), "launch", "gazebo.launch.py"])]
),
launch_arguments={"verbose": "false", "pause": "true"}.items(),
)
# Get URDF via xacro
robot_description_content = Command(
[
PathJoinSubstitution([FindExecutable(name="xacro")]),
" ",
PathJoinSubstitution(
[
FindPackageShare("ros2_description_bolt"),
"urdf",
"system_bolt_description.urdf.xacro",
]
),
" use_sim:=true",
]
)
robot_description = {"robot_description": robot_description_content}
node_robot_state_publisher = Node(
package="robot_state_publisher",
executable="robot_state_publisher",
output="screen",
parameters=[robot_description],
)
spawn_entity = Node(
package="gazebo_ros",
executable="spawn_entity.py",
arguments=["-topic", "robot_description", "-entity", "bolt", "-x 0", "-y 0", "-z 0.5"],
output="screen",
)
spawn_controller = Node(
package="controller_manager",
executable="spawner.py",
arguments=["joint_state_broadcaster"],
output="screen",
)
spawn_controller_effort = Node(
package="controller_manager",
executable="spawner.py",
arguments=["effort_controllers"],
output="screen",
)
return LaunchDescription(
[
gazebo,
node_robot_state_publisher,
spawn_entity,
spawn_controller,
spawn_controller_effort,
]
)
| 30.528571
| 98
| 0.617688
|
794e60b6305209053b335fa8f0743658bae15d6f
| 355
|
py
|
Python
|
laia/losses/dortmund_bce_loss.py
|
basbeu/PyLaia
|
d14458484b56622204b1730a7d53220c5d0f1bc1
|
[
"MIT"
] | 2
|
2020-09-10T13:31:17.000Z
|
2021-07-31T09:44:17.000Z
|
laia/losses/dortmund_bce_loss.py
|
basbeu/PyLaia
|
d14458484b56622204b1730a7d53220c5d0f1bc1
|
[
"MIT"
] | 1
|
2020-12-06T18:11:52.000Z
|
2020-12-06T18:19:38.000Z
|
laia/losses/dortmund_bce_loss.py
|
basbeu/PyLaia
|
d14458484b56622204b1730a7d53220c5d0f1bc1
|
[
"MIT"
] | 2
|
2020-04-20T13:40:56.000Z
|
2020-10-17T11:59:55.000Z
|
from __future__ import absolute_import
from torch.nn import BCEWithLogitsLoss
class DortmundBCELoss(BCEWithLogitsLoss):
def __init__(self):
super(DortmundBCELoss, self).__init__(reduction="sum")
def forward(self, output, target):
loss = super(DortmundBCELoss, self).forward(output, target)
return loss / output.size(0)
| 27.307692
| 67
| 0.729577
|
794e618cfb1c109eabf3bdac9d8307e9180cc8ab
| 1,425
|
py
|
Python
|
parent/restcache/test-server/resttest/resttest/wsgi.py
|
kinokocchi/Ttada
|
632311375bb4b5a629a5455dea677c3fd69d873a
|
[
"MIT"
] | null | null | null |
parent/restcache/test-server/resttest/resttest/wsgi.py
|
kinokocchi/Ttada
|
632311375bb4b5a629a5455dea677c3fd69d873a
|
[
"MIT"
] | null | null | null |
parent/restcache/test-server/resttest/resttest/wsgi.py
|
kinokocchi/Ttada
|
632311375bb4b5a629a5455dea677c3fd69d873a
|
[
"MIT"
] | null | null | null |
"""
WSGI config for resttest project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "resttest.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "resttest.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 43.181818
| 79
| 0.803509
|
794e624fdcb29d0663882dac712031dc1c41ad20
| 6,675
|
py
|
Python
|
python/sklearn/sklearn/dummy.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | 1
|
2017-10-14T04:23:45.000Z
|
2017-10-14T04:23:45.000Z
|
python/sklearn/sklearn/dummy.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | null | null | null |
python/sklearn/sklearn/dummy.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | null | null | null |
# Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD Style.
import numpy as np
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .utils import check_random_state
from .utils.fixes import unique
from .utils.validation import safe_asarray
class DummyClassifier(BaseEstimator, ClassifierMixin):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Parameters
----------
strategy: str
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "uniform": generates predictions uniformly at random.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use.
Attributes
----------
`classes_` : array, shape = [n_classes]
Class labels.
`class_prior_` : array, shape = [n_classes]
Probability of each class.
"""
def __init__(self, strategy="stratified", random_state=None):
self.strategy = strategy
self.random_state = random_state
def fit(self, X, y):
"""Fit the random classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("most_frequent", "stratified", "uniform"):
raise ValueError("Unknown strategy type.")
self.classes_, y = unique(y, return_inverse=True)
self.class_prior_ = np.bincount(y) / float(y.shape[0])
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples]
Predicted target values for X.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = safe_asarray(X)
n_samples = X.shape[0]
rs = check_random_state(self.random_state)
if self.strategy == "most_frequent":
ret = np.ones(n_samples, dtype=int) * self.class_prior_.argmax()
elif self.strategy == "stratified":
ret = self.predict_proba(X).argmax(axis=1)
elif self.strategy == "uniform":
ret = rs.randint(len(self.classes_), size=n_samples)
return self.classes_[ret]
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = safe_asarray(X)
n_samples = X.shape[0]
n_classes = len(self.classes_)
rs = check_random_state(self.random_state)
if self.strategy == "most_frequent":
ind = np.ones(n_samples, dtype=int) * self.class_prior_.argmax()
out = np.zeros((n_samples, n_classes), dtype=np.float64)
out[:, ind] = 1.0
elif self.strategy == "stratified":
out = rs.multinomial(1, self.class_prior_, size=n_samples)
elif self.strategy == "uniform":
out = np.ones((n_samples, n_classes), dtype=np.float64)
out /= n_classes
return out
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like, shape = [n_samples, n_classes]
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically.
"""
return np.log(self.predict_proba(X))
class DummyRegressor(BaseEstimator, RegressorMixin):
"""
DummyRegressor is a regressor that always predicts the mean of the training
targets.
This regressor is useful as a simple baseline to compare with other
(real) regressors. Do not use it for real problems.
Attributes
----------
`y_mean_` : float
Mean of the training targets.
"""
def fit(self, X, y):
"""Fit the random regressor.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns self.
"""
self.y_mean_ = np.mean(y)
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples]
Predicted target values for X.
"""
if not hasattr(self, "y_mean_"):
raise ValueError("DummyRegressor not fitted.")
X = safe_asarray(X)
n_samples = X.shape[0]
return np.ones(n_samples) * self.y_mean_
| 31.485849
| 79
| 0.594457
|
794e62e4aafc8c2ec393b466096743dcc1ab0ac4
| 5,482
|
py
|
Python
|
qiskit_metal/qlibrary/lumped/resonator_coil_rect.py
|
wdczdj/qiskit-metal
|
c77805f66da60021ef8d10d668715c1dc2ebcd1d
|
[
"Apache-2.0"
] | null | null | null |
qiskit_metal/qlibrary/lumped/resonator_coil_rect.py
|
wdczdj/qiskit-metal
|
c77805f66da60021ef8d10d668715c1dc2ebcd1d
|
[
"Apache-2.0"
] | null | null | null |
qiskit_metal/qlibrary/lumped/resonator_coil_rect.py
|
wdczdj/qiskit-metal
|
c77805f66da60021ef8d10d668715c1dc2ebcd1d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""File contains dictionary for NSquareSpiral and the make()."""
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core import QComponent
import numpy as np
class ResonatorCoilRect(QComponent):
"""A rectnagle spiral resonator based on length input. The X dimension is
modified by the code based on the total length inputed.
Inherits `QComponent` class
A rectangular spiral resonator. The width of the spiral is modified based on inputted values
and given total length of the spiral.
::
<--------X-------->
__________________
| ___________ |
| | | |
| | | |
| |______________|
|
Default Options:
Convention: Values (unless noted) are strings with units included,
(e.g., '30um')
* n: '3' -- Number of turns of the spiral
* length: '2000um' -- Total length of the spiral
* line_width: '1um' -- The width of the line of the spiral
* height: '40um' -- The height of the inner portion of the spiral
* gap: '4um' -- The distance between each layer of the spiral
* coupler_distance: '10um' -- The pin position from the grounded termination of the spiral
* pos_x: '0um' -- The x position of the ground termination.
* pos_y: '0um' -- The y position of the ground termination.
* rotation: '0' -- The direction of the termination. 0 degrees is +x, following a
counter-clockwise rotation (eg. 90 is +y)
* chip: 'main' -- The chip the pin should be on.
* layer: '1' -- Layer the pin is on. Does not have any practical impact to the short.
"""
component_metadata = Dict(short_name='res')
"""Component metadata"""
default_options = Dict(n='3',
length='2000um',
line_width='1um',
height='40um',
gap='4um',
coupler_distance='10um',
pos_x='0um',
pos_y='0um',
rotation='0',
chip='main',
layer='1')
"""Default drawing options"""
def make(self):
"""The make function implements the logic that creates the geoemtry
(poly, path, etc.) from the qcomponent.options dictionary of
parameters, and the adds them to the design, using
qcomponent.add_qgeometry(...), adding in extra needed information, such
as layer, subtract, etc."""
p = self.p # p for parsed parameters. Access to the parsed options.
n = int(p.n)
# Create the geometry
spiral_list = []
#Formulat to determine the size of the spiral based on inputed length.
x_n = (p.length / (2 * n)) - (p.height + 2 * (p.gap + p.line_width) *
(2 * n - 1))
if x_n <= p.gap + p.line_width:
self._error_message = f'Inputted values results in the width of the spiral being too small.'
self.logger.warning(self._error_message)
return
for step in range(n):
x_point = x_n / 2 + step * (p.line_width + p.gap)
y_point = p.height / 2 + step * (p.line_width + p.gap)
spiral_list.append((-x_point, -y_point))
spiral_list.append((x_point, -y_point))
spiral_list.append((x_point, y_point))
spiral_list.append((-x_point - (p.line_width + p.gap), y_point))
x_point = (x_n / 2 + (step + 1) * (p.line_width + p.gap))
y_point = (p.height / 2 + (step + 1) * (p.line_width + p.gap) -
p.line_width / 2)
spiral_list.append((-x_point, -y_point))
spiral_list = draw.LineString(spiral_list)
spiral_etch = draw.shapely.geometry.box(
-(x_point + p.line_width / 2 + p.gap), -y_point,
x_point - p.line_width / 2, y_point)
#Generates a linestring to track port location
points = draw.LineString([
(-x_point + p.line_width / 2, -y_point + p.coupler_distance),
(-x_point - p.line_width / 2, -y_point + p.coupler_distance)
])
c_items = [spiral_list, spiral_etch, points]
c_items = draw.rotate(c_items, p.rotation, origin=(0, 0))
c_items = draw.translate(c_items, p.pos_x, p.pos_y)
[spiral_list, spiral_etch, points] = c_items
##############################################
# add elements
self.add_qgeometry('path', {'n_spiral': spiral_list},
width=p.line_width)
self.add_qgeometry('poly', {'n_spira_etch': spiral_etch}, subtract=True)
# NEW PIN SPOT
self.add_pin('spiralPin',
points=np.array(points.coords),
width=p.line_width,
input_as_norm=True)
| 41.218045
| 104
| 0.568588
|
794e63dc8c774e769d71067b3c58e691d118ade4
| 5,347
|
py
|
Python
|
models/sngan_cifar10.py
|
sudarshanregmi/ICRGAN-and-SSGAN
|
c9e7b01d89cba19505e566892a678932717b8039
|
[
"MIT"
] | 3
|
2021-02-03T17:19:33.000Z
|
2021-02-03T17:22:14.000Z
|
models/sngan_cifar10.py
|
sudarshanregmi/ICRGAN-and-SSGAN
|
c9e7b01d89cba19505e566892a678932717b8039
|
[
"MIT"
] | null | null | null |
models/sngan_cifar10.py
|
sudarshanregmi/ICRGAN-and-SSGAN
|
c9e7b01d89cba19505e566892a678932717b8039
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
from .gen_resblock import GenBlock
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = args.gf_dim
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(self.ch)
self.c5 = nn.Conv2d(self.ch, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), ssup=False):
super(Discriminator, self).__init__()
self.ch = args.df_dim
self.activation = activation
self.ssup = ssup
self.block1 = OptimizedDisBlock(args, 3, self.ch)
self.block2 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=True)
self.block3 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.block4 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.softmax = nn.Softmax()
if ssup:
self.fully_connect_rot = nn.Linear(self.ch, 4, bias=False)
self.fully_connect_gan = nn.Linear(self.ch, 1, bias=False)
if args.d_spectral_norm:
self.fully_connect_gan = nn.utils.spectral_norm(self.fully_connect_gan)
if ssup:
self.fully_connect_rot = nn.utils.spectral_norm(self.fully_connect_rot)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.activation(h)
# GAN logits
# Global average pooling
h = h.sum(2).sum(2)
gan_logits = self.fully_connect_gan(h)
rot_logits, rot_prob = -1, -1
if self.ssup:
rot_logits = self.fully_connect_rot(h)
rot_prob = self.softmax(rot_logits)
return gan_logits, rot_logits, rot_prob
| 35.885906
| 107
| 0.620909
|
794e64393602b4f9f160790fdb9ea04191a0e53a
| 1,454
|
py
|
Python
|
src/resource-graph/azext_resourcegraph/vendored_sdks/resourcegraph/models/error_py3.py
|
mayank88mahajan/azure-cli-extensions
|
8bd389a1877bffd14052bec5519ce75dc6fc34cf
|
[
"MIT"
] | 1
|
2019-05-10T19:58:09.000Z
|
2019-05-10T19:58:09.000Z
|
src/resource-graph/azext_resourcegraph/vendored_sdks/resourcegraph/models/error_py3.py
|
mayank88mahajan/azure-cli-extensions
|
8bd389a1877bffd14052bec5519ce75dc6fc34cf
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
src/resource-graph/azext_resourcegraph/vendored_sdks/resourcegraph/models/error_py3.py
|
mayank88mahajan/azure-cli-extensions
|
8bd389a1877bffd14052bec5519ce75dc6fc34cf
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Error(Model):
"""Error info.
Error details.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code identifying the specific error.
:type code: str
:param message: Required. A human readable error message.
:type message: str
:param details: Error details
:type details: list[~azure.mgmt.resourcegraph.models.ErrorDetails]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetails]'},
}
def __init__(self, *, code: str, message: str, details=None, **kwargs) -> None:
super(Error, self).__init__(**kwargs)
self.code = code
self.message = message
self.details = details
| 31.608696
| 83
| 0.577029
|
794e650a8399ef5a01f6e253d3e63167cbde8df1
| 9,148
|
py
|
Python
|
protocol/sensor_msgs/msg/not used/_MagneticField.py
|
Tsinghua-OpenICV/carla_icv_bridge
|
4d5f8c26b1847dbb16a81fe43f146bf4a9a8da5e
|
[
"MIT"
] | null | null | null |
protocol/sensor_msgs/msg/not used/_MagneticField.py
|
Tsinghua-OpenICV/carla_icv_bridge
|
4d5f8c26b1847dbb16a81fe43f146bf4a9a8da5e
|
[
"MIT"
] | null | null | null |
protocol/sensor_msgs/msg/not used/_MagneticField.py
|
Tsinghua-OpenICV/carla_icv_bridge
|
4d5f8c26b1847dbb16a81fe43f146bf4a9a8da5e
|
[
"MIT"
] | 1
|
2020-12-19T05:48:01.000Z
|
2020-12-19T05:48:01.000Z
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from sensor_msgs/MagneticField.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import protocol.std_msgs.msg as std_msgs
class MagneticField(genpy.Message):
_md5sum = "2f3b0b43eed0c9501de0fa3ff89a45aa"
_type = "sensor_msgs/MagneticField"
_has_header = True #flag to mark the presence of a Header object
_full_text = """ # Measurement of the Magnetic Field vector at a specific location.
# If the covariance of the measurement is known, it should be filled in
# (if all you know is the variance of each measurement, e.g. from the datasheet,
#just put those along the diagonal)
# A covariance matrix of all zeicv will be interpreted as "covariance unknown",
# and to use the data a covariance will have to be assumed or gotten from some
# other source
Header header # timestamp is the time the
# field was measured
# frame_id is the location and orientation
# of the field measurement
geometry_msgs/Vector3 magnetic_field # x, y, and z components of the
# field vector in Tesla
# If your sensor does not output 3 axes,
# put NaNs in the components not reported.
float64[9] magnetic_field_covariance # Row major about x, y, z axes
# 0 is interpreted as variance unknown
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z"""
__slots__ = ['header','magnetic_field','magnetic_field_covariance']
_slot_types = ['std_msgs/Header','geometry_msgs/Vector3','float64[9]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,magnetic_field,magnetic_field_covariance
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MagneticField, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.Header()
if self.magnetic_field is None:
self.magnetic_field = geometry_msgs.msg.Vector3()
if self.magnetic_field_covariance is None:
self.magnetic_field_covariance = [0.] * 9
else:
self.header = std_msgs.Header()
self.magnetic_field = geometry_msgs.msg.Vector3()
self.magnetic_field_covariance = [0.] * 9
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3d().pack(_x.magnetic_field.x, _x.magnetic_field.y, _x.magnetic_field.z))
buff.write(_get_struct_9d().pack(*self.magnetic_field_covariance))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.Header()
if self.magnetic_field is None:
self.magnetic_field = geometry_msgs.msg.Vector3()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 24
(_x.magnetic_field.x, _x.magnetic_field.y, _x.magnetic_field.z,) = _get_struct_3d().unpack(str[start:end])
start = end
end += 72
self.magnetic_field_covariance = _get_struct_9d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3d().pack(_x.magnetic_field.x, _x.magnetic_field.y, _x.magnetic_field.z))
buff.write(self.magnetic_field_covariance.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.Header()
if self.magnetic_field is None:
self.magnetic_field = geometry_msgs.msg.Vector3()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 24
(_x.magnetic_field.x, _x.magnetic_field.y, _x.magnetic_field.z,) = _get_struct_3d().unpack(str[start:end])
start = end
end += 72
self.magnetic_field_covariance = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_9d = None
def _get_struct_9d():
global _struct_9d
if _struct_9d is None:
_struct_9d = struct.Struct("<9d")
return _struct_9d
_struct_3d = None
def _get_struct_3d():
global _struct_3d
if _struct_3d is None:
_struct_3d = struct.Struct("<3d")
return _struct_3d
| 38.276151
| 145
| 0.648885
|
794e66062d08af1fd6a8ef151ddab59a3435da38
| 1,359
|
py
|
Python
|
functions/QBSM.py
|
Yuleii/yulei-thesis-QBSM-kw94
|
bb882bc6c809331c370a4d6442c36ad67ccad498
|
[
"MIT"
] | null | null | null |
functions/QBSM.py
|
Yuleii/yulei-thesis-QBSM-kw94
|
bb882bc6c809331c370a4d6442c36ad67ccad498
|
[
"MIT"
] | null | null | null |
functions/QBSM.py
|
Yuleii/yulei-thesis-QBSM-kw94
|
bb882bc6c809331c370a4d6442c36ad67ccad498
|
[
"MIT"
] | null | null | null |
"""Functions that compute quantile-based sensitivity measures."""
import numpy as np
def quantile_measures(quantile_y_x, quantile_y_x_mix):
"""Estimate the values of quantile based measures."""
m, n_params, len_alp = quantile_y_x_mix.shape[:3]
# initialization
q_1 = np.zeros((len_alp, n_params))
q_2 = np.zeros((len_alp, n_params))
delt = np.zeros((m, n_params, len_alp, 1))
for j in range(m):
for i in range(n_params):
for pp in range(len_alp):
delt[j, i, pp] = quantile_y_x_mix[j, i, pp] - quantile_y_x[pp]
q_1[pp, i] = np.mean(np.absolute(delt[:, i, pp]))
q_2[pp, i] = np.mean(delt[:, i, pp] ** 2)
return q_1, q_2
def normalized_quantile_measures(q_1, q_2):
"""Estimate the values of normalized quantile based measures."""
len_alp, n_params = q_1.shape
# initialization
sum_q_1 = np.zeros(len_alp)
sum_q_2 = np.zeros(len_alp)
norm_q_1 = np.zeros((len_alp, n_params))
norm_q_2 = np.zeros((len_alp, n_params))
# Equation 13 & 14
for pp in range(len_alp):
sum_q_1[pp] = np.sum(q_1[pp, :])
sum_q_2[pp] = np.sum(q_2[pp, :])
for i in range(n_params):
norm_q_1[pp, i] = q_1[pp, i] / sum_q_1[pp]
norm_q_2[pp, i] = q_2[pp, i] / sum_q_2[pp]
return norm_q_1, norm_q_2
| 31.604651
| 78
| 0.6078
|
794e67a16253ecf691a0d0784f21a801c2ec61cb
| 210
|
py
|
Python
|
1_estrutura_sequencial/06_area_circulo.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
1_estrutura_sequencial/06_area_circulo.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
1_estrutura_sequencial/06_area_circulo.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
"""
6. Faça um Programa que peça o raio de um círculo, calcule e mostre sua área.
"""
raio = int(input('Informe o raio do círculo: '))
area = 3.14 * (raio ** 2)
print('A área do círculo é: {}'.format(area))
| 21
| 77
| 0.638095
|
794e689b7c0116cee4078be9f755ca5e2adcdfa4
| 1,809
|
py
|
Python
|
parse.py
|
logpacket/sun-rin_geupsic
|
333f149525f6100869f3123313c8635ef4cddb7e
|
[
"MIT"
] | null | null | null |
parse.py
|
logpacket/sun-rin_geupsic
|
333f149525f6100869f3123313c8635ef4cddb7e
|
[
"MIT"
] | null | null | null |
parse.py
|
logpacket/sun-rin_geupsic
|
333f149525f6100869f3123313c8635ef4cddb7e
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
import sys
import requests
from bs4 import BeautifulSoup
from datetime import datetime
reload(sys)
sys.setdefaultencoding('utf8')
key = '"keyboard":{"type":"buttons", "buttons":["오늘의 급식", "내일의 급식"]}}'
output_date = []
def create_form(menu):
menu = menu.replace('\n', '')
menu = menu.replace('\r', '')
menu = menu.replace('\t', '')
menu = menu.replace(' ', '')
menu = menu.replace(',', '\\n')
menu = menu.replace('(', '\\n')
menu = menu.replace(')', '')
message = '{"message":{"text":' + '\"'+ menu + '\"' + '}' + ',' + key
return message
def check_day():
date = soup.select('dl > dd > p.date')
to_day = datetime.today()
to_day = str(to_day)
to_day = to_day[8:10]
date_list = []
for day in date:
output_date.append(day.text)
date_list.append(day.text[8:10])
if to_day in date_list :
return 1;
else :
today.write('{"message":{"text":' + '\"' + '오늘 급식이 없습니다!' + '\"' + '}' + ',' + key)
to_day = int(to_day)
to_day += 1
tomorrow_day = str(to_day)
if tomorrow_day != date_list[0]:
tomorrow.write('{"message":{"text":' + '\"' + '내일 급식이 없습니다!' + '\"' + '}' + ',' + key)
sys.exit()
else :
tomorrow.write(create_form(output_date[0] +'\\n\\n'+ menu[0].text))
sys.exit()
today = open("/home/packet/sun-rin_geupsic/today", "w")
tomorrow = open("/home/packet/sun-rin_geupsic/tomorrow", "w")
req = requests.get('http://www.sunrint.hs.kr/index.do')
html = req.text
soup = BeautifulSoup(html, 'html.parser')
menu = soup.select(' dl > dd > p.menu')
check_day()
today.write(create_form(output_date[0] + '\\n\\nn' + menu[0].text))
tomorrow.write(create_form(output_date[1] + '\\n\\n' + menu[1].text))
| 32.890909
| 98
| 0.557767
|
794e69386fa7a400922c65085d72d1fd8fe0b2e6
| 1,325
|
py
|
Python
|
aiida/tools/data/array/trajectory.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | 1
|
2019-07-31T04:08:13.000Z
|
2019-07-31T04:08:13.000Z
|
aiida/tools/data/array/trajectory.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/tools/data/array/trajectory.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tools to operate on `TrajectoryData` nodes."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida.engine import calcfunction
@calcfunction
def _get_aiida_structure_inline(trajectory, parameters):
"""
Creates :py:class:`aiida.orm.nodes.data.structure.StructureData` using ASE.
.. note:: requires ASE module.
"""
kwargs = {}
if parameters is not None:
kwargs = parameters.get_dict()
if 'index' not in kwargs.keys() or kwargs['index'] is None:
raise ValueError("Step index is not supplied for TrajectoryData")
return {'structure': trajectory.get_step_structure(**kwargs)}
| 42.741935
| 79
| 0.556981
|
794e6a61a34f4dbaa3ee669bc7173eb7fe760c24
| 2,877
|
py
|
Python
|
src/binwalk/plugins/lzmamod.py
|
zhao07/binwalk
|
99d814a04b183d9f19d8425189a1c20ec8d64d5a
|
[
"MIT"
] | 1
|
2015-05-04T21:08:20.000Z
|
2015-05-04T21:08:20.000Z
|
src/binwalk/plugins/lzmamod.py
|
zhao07/binwalk
|
99d814a04b183d9f19d8425189a1c20ec8d64d5a
|
[
"MIT"
] | null | null | null |
src/binwalk/plugins/lzmamod.py
|
zhao07/binwalk
|
99d814a04b183d9f19d8425189a1c20ec8d64d5a
|
[
"MIT"
] | null | null | null |
import os
import shutil
import binwalk.core.plugin
from binwalk.core.compat import *
from binwalk.core.common import BlockFile
class LZMAModPlugin(binwalk.core.plugin.Plugin):
'''
Finds and extracts modified LZMA files commonly found in cable modems.
Based on Bernardo Rodrigues' work: http://w00tsec.blogspot.com/2013/11/unpacking-firmware-images-from-cable.html
'''
MODULES = ['Signature']
FAKE_LZMA_SIZE = "\x00\x00\x00\x10\x00\x00\x00\x00"
SIGNATURE = "lzma compressed data"
def init(self):
self.original_cmd = ''
# Replace the existing LZMA extraction command with our own
# Note that this assumes that there is *one* LZMA extraction command...
rules = self.module.extractor.get_rules()
for i in range(0, len(rules)):
if rules[i]['regex'].match(self.SIGNATURE) and rules[i]['cmd']:
self.original_cmd = rules[i]['cmd']
rules[i]['cmd'] = self.lzma_cable_extractor
break
def lzma_cable_extractor(self, fname):
# Try extracting the LZMA file without modification first
result = self.module.extractor.execute(self.original_cmd, fname)
# If the external extractor was successul (True) or didn't exist (None), don't do anything.
if result not in [True, None]:
out_name = os.path.splitext(fname)[0] + '-patched' + os.path.splitext(fname)[1]
fp_out = BlockFile(out_name, 'w')
# Use self.module.config.open_file here to ensure that other config settings (such as byte-swapping) are honored
fp_in = self.module.config.open_file(fname, offset=0, length=0)
fp_in.set_block_size(peek=0)
i = 0
while i < fp_in.length:
(data, dlen) = fp_in.read_block()
if i == 0:
out_data = data[0:5] + self.FAKE_LZMA_SIZE + data[5:]
else:
out_data = data
fp_out.write(out_data)
i += dlen
fp_in.close()
fp_out.close()
# Overwrite the original file so that it can be cleaned up if -r was specified
shutil.move(out_name, fname)
self.module.extractor.execute(self.original_cmd, fname)
def scan(self, result):
# The modified cable modem LZMA headers all have valid dictionary sizes and a properties byte of 0x5D.
if result.description.lower().startswith(self.SIGNATURE) and "invalid uncompressed size" in result.description:
if "properties: 0x5D" in result.description and "invalid dictionary size" not in result.description:
result.valid = True
result.description = result.description.split("invalid uncompressed size")[0] + "missing uncompressed size"
| 42.308824
| 124
| 0.619395
|
794e6b1fbe3208ce353ab0f3565e274bc96a6dc2
| 1,231
|
py
|
Python
|
about/models.py
|
D-GopalKrishna/RobotixWeb2021
|
3f99d41b2c4c99a3d1a214db1489f3e2fb1bfbb2
|
[
"Apache-2.0"
] | null | null | null |
about/models.py
|
D-GopalKrishna/RobotixWeb2021
|
3f99d41b2c4c99a3d1a214db1489f3e2fb1bfbb2
|
[
"Apache-2.0"
] | 7
|
2020-02-12T02:54:35.000Z
|
2022-03-12T00:06:26.000Z
|
about/models.py
|
D-GopalKrishna/RobotixWeb2021
|
3f99d41b2c4c99a3d1a214db1489f3e2fb1bfbb2
|
[
"Apache-2.0"
] | 6
|
2020-02-10T16:37:38.000Z
|
2021-01-28T13:39:46.000Z
|
from django.db import models
# Create your models here.
class Convenor(models.Model):
photo = models.ImageField(upload_to='img/convenors')
name = models.CharField(max_length=50)
branch = models.CharField(max_length=50)
fb_id = models.URLField()
mail_id = models.EmailField()
phone = models.CharField(max_length=100)
def __str__(self):
return self.name
class HeadCoordinator(models.Model):
name = models.CharField(max_length=50)
branch = models.CharField(max_length=50)
fb_id = models.URLField()
mail_id = models.EmailField()
phone = models.CharField(max_length=100)
def __str__(self):
return self.name
class Manager(models.Model):
name = models.CharField(max_length=50)
branch = models.CharField(max_length=50)
fb_id = models.URLField()
mail_id = models.EmailField()
phone = models.CharField(max_length=100)
def __str__(self):
return self.name
class Coordinator(models.Model):
name = models.CharField(max_length=50)
branch = models.CharField(max_length=50)
fb_id = models.URLField()
mail_id = models.EmailField()
phone = models.CharField(max_length=100)
def __str__(self):
return self.name
| 27.355556
| 56
| 0.697807
|
794e6c8c1251d8c60e3913d0cce7bdd87337479b
| 1,148
|
py
|
Python
|
report.py
|
kaushalvivek/train-escort-app
|
d87e8c0d6ffcfbc34e7a6c745ad675fdb72eba29
|
[
"MIT"
] | 1
|
2020-06-07T05:19:34.000Z
|
2020-06-07T05:19:34.000Z
|
report.py
|
kaushalvivek/train-escort-app
|
d87e8c0d6ffcfbc34e7a6c745ad675fdb72eba29
|
[
"MIT"
] | 3
|
2020-05-28T09:35:15.000Z
|
2020-05-28T09:37:24.000Z
|
report.py
|
kaushalvivek/train-escort-app
|
d87e8c0d6ffcfbc34e7a6c745ad675fdb72eba29
|
[
"MIT"
] | null | null | null |
import sqlite3
import dateparser
from datetime import datetime, timedelta, date
from mail import send_mail
# path="/home/traincheck/mysite/"
path=""
conn = sqlite3.connect(path+'store.db')
c = conn.cursor()
now = datetime.now()
hours = timedelta(hours=24)
cutoff= now-hours
c.execute("SELECT * from Escort where datetime_created > '%s'" % cutoff)
unique = []
for i in c:
train_no = i[2]
origin_date = i[12]
uni = str(train_no)+origin_date
unique.append(uni)
segments = len(unique)
trains = len(set(unique))
conn.close()
# mail section
sender = {
'email': "vivekkaushalauto@gmail.com",
'password':"autoaccount",
'identity':"Train Escort Check",
'smtp':"smtp.gmail.com",
}
recipients = ['ascpsrb@gmail.com']
subject = 'Daily Report : '+str(now.date())
content = "Hello,<br/> Report generated on : "+str(now.date())+\
"<br/>Number of Segments Escorted : "+str(segments) +\
"<br/>Number of Trains Escorted : "+str(trains)+\
"<br/>Regards,<br/>Train Check Escort<br/>"+\
"<br/><a href='http://vivekkaushal.com'>vivekkaushal.com</a>"
send_mail(sender, recipients, subject, content)
print('mails sent on', now)
print ('done')
| 24.425532
| 72
| 0.686411
|
794e6cfd62304f9de7f7b4624f168563d5fc8453
| 2,353
|
py
|
Python
|
tests/application/test_version.py
|
pallavigopi/esper-cli
|
83c3536088031fd6a9e5e6e7ae8f18e3e82eeb78
|
[
"Apache-2.0"
] | 7
|
2019-05-17T06:56:37.000Z
|
2022-03-18T16:54:48.000Z
|
tests/application/test_version.py
|
pallavigopi/esper-cli
|
83c3536088031fd6a9e5e6e7ae8f18e3e82eeb78
|
[
"Apache-2.0"
] | 5
|
2019-07-29T17:55:33.000Z
|
2022-01-19T02:01:45.000Z
|
tests/application/test_version.py
|
pallavigopi/esper-cli
|
83c3536088031fd6a9e5e6e7ae8f18e3e82eeb78
|
[
"Apache-2.0"
] | 9
|
2019-08-22T06:15:39.000Z
|
2021-10-04T09:08:50.000Z
|
from unittest import TestCase
from _pytest.monkeypatch import MonkeyPatch
from esper.main import EsperTest
from tests.utils import set_configure, teardown
class VersionTest(TestCase):
def setUp(self) -> None:
self.monkeypatch = MonkeyPatch()
set_configure(self.monkeypatch)
argv = ['app', 'upload', 'tests/application/Tiny Notepad Simple Small_v1.0_apkpure.com.apk']
with EsperTest(argv=argv) as app:
app.run()
data, output = app.last_rendered
self.application_id = data[0]["DETAILS"]
def tearDown(self) -> None:
argv = ['app', 'delete', self.application_id]
with EsperTest(argv=argv) as app:
app.run()
teardown()
def test_list_version(self):
argv = ['version', 'list', '--app', self.application_id]
with EsperTest(argv=argv) as app:
app.run()
data, output = app.last_rendered
assert len(data) == 1
def test_list_version_with_active_application(self):
argv = ['app', 'set-active', '--id', self.application_id]
with EsperTest(argv=argv) as app:
app.run()
argv = ['version', 'list']
with EsperTest(argv=argv) as app:
app.run()
data, output = app.last_rendered
assert len(data) == 1
def test_show_version(self):
argv = ['version', 'list', '--app', self.application_id,]
with EsperTest(argv=argv) as app:
app.run()
data, output = app.last_rendered
version_id = data[0]["ID"]
argv = ['version', 'show', '--app', self.application_id, version_id]
with EsperTest(argv=argv) as app:
app.run()
data, output = app.last_rendered
assert data[0]["DETAILS"] == version_id
def test_delete_version(self):
argv = ['version', 'list', '--app', self.application_id,]
with EsperTest(argv=argv) as app:
app.run()
data, output = app.last_rendered
version_id = data[0]["ID"]
argv = ['version', 'delete', '--app', self.application_id, version_id]
with EsperTest(argv=argv) as app:
app.run()
data, output = app.last_rendered
assert data == f"Version with id {version_id} deleted successfully"
| 31.373333
| 100
| 0.58351
|
794e6d2c92c89bd9bf6da821d62481cf8544a92d
| 6,819
|
py
|
Python
|
Scalable-Machine-Learning-with-Apache-Spark/ML Electives/MLE 03 - Logistic Regression Lab.py
|
databricks-academy/scalable-machine-learning-with-apache-spark
|
2b560dea766e2e6589defaaf6d9d15f361ce6db6
|
[
"CC0-1.0"
] | null | null | null |
Scalable-Machine-Learning-with-Apache-Spark/ML Electives/MLE 03 - Logistic Regression Lab.py
|
databricks-academy/scalable-machine-learning-with-apache-spark
|
2b560dea766e2e6589defaaf6d9d15f361ce6db6
|
[
"CC0-1.0"
] | null | null | null |
Scalable-Machine-Learning-with-Apache-Spark/ML Electives/MLE 03 - Logistic Regression Lab.py
|
databricks-academy/scalable-machine-learning-with-apache-spark
|
2b560dea766e2e6589defaaf6d9d15f361ce6db6
|
[
"CC0-1.0"
] | 8
|
2021-12-09T19:40:48.000Z
|
2022-03-24T19:19:42.000Z
|
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md # Classification: Logistic Regression
# MAGIC
# MAGIC Up until this point, we have only examined regression use cases. Now let's take a look at how to handle classification.
# MAGIC
# MAGIC For this lab, we will use the same Airbnb dataset, but instead of predicting price, we will predict if host is a <a href="https://www.airbnb.com/superhost" target="_blank">superhost</a> or not in San Francisco.
# MAGIC
# MAGIC ##  In this lesson you:<br>
# MAGIC - Build a Logistic Regression model
# MAGIC - Use various metrics to evaluate model performance
# COMMAND ----------
# MAGIC %run "../Includes/Classroom-Setup"
# COMMAND ----------
file_path = f"{datasets_dir}/airbnb/sf-listings/sf-listings-2019-03-06-clean.delta/"
airbnb_df = spark.read.format("delta").load(file_path)
# COMMAND ----------
# MAGIC %md ## Baseline Model
# MAGIC
# MAGIC Before we build any Machine Learning models, we want to build a baseline model to compare to. We are going to start by predicting if a host is a <a href="https://www.airbnb.com/superhost" target="_blank">superhost</a>.
# MAGIC
# MAGIC For our baseline model, we are going to predict no on is a superhost and evaluate our accuracy. We will examine other metrics later as we build more complex models.
# MAGIC
# MAGIC 0. Convert our **`host_is_superhost`** column (t/f) into 1/0 and call the resulting column **`label`**. DROP the **`host_is_superhost`** afterwards.
# MAGIC 0. Add a column to the resulting DataFrame called **`prediction`** which contains the literal value **`0.0`**. We will make a constant prediction that no one is a superhost.
# MAGIC
# MAGIC After we finish these two steps, then we can evaluate the "model" accuracy.
# MAGIC
# MAGIC Some helpful functions:
# MAGIC * <a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.functions.when.html#pyspark.sql.functions.when" target="_blank">when()</a>
# MAGIC * <a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.withColumn.html?highlight=withcolumn#pyspark.sql.DataFrame.withColumn" target="_blank">withColumn()</a>
# MAGIC * <a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.functions.lit.html?highlight=lit#pyspark.sql.functions.lit" target="_blank">lit()</a>
# COMMAND ----------
# TODO
from <FILL_IN>
label_df = airbnb_df.<FILL_IN>
pred_df = label_df.<FILL_IN> # Add a prediction column
# COMMAND ----------
# MAGIC %md ## Evaluate model
# MAGIC
# MAGIC For right now, let's use accuracy as our metric. This is available from <a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.evaluation.MulticlassClassificationEvaluator.html?highlight=multiclassclassificationevaluator#pyspark.ml.evaluation.MulticlassClassificationEvaluator" target="_blank">MulticlassClassificationEvaluator</a>.
# COMMAND ----------
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
mc_evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
print(f"The accuracy is {100*mc_evaluator.evaluate(pred_df):.2f}%")
# COMMAND ----------
# MAGIC %md ## Train-Test Split
# MAGIC
# MAGIC Alright! Now we have built a baseline model. The next step is to split our data into a train-test split.
# COMMAND ----------
train_df, test_df = label_df.randomSplit([.8, .2], seed=42)
print(train_df.cache().count())
# COMMAND ----------
# MAGIC %md ## Visualize
# MAGIC
# MAGIC Let's look at the relationship between **`review_scores_rating`** and **`label`** in our training dataset.
# COMMAND ----------
display(train_df.select("review_scores_rating", "label"))
# COMMAND ----------
# MAGIC %md ## Logistic Regression
# MAGIC
# MAGIC Now build a <a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.classification.LogisticRegression.html?highlight=logisticregression#pyspark.ml.classification.LogisticRegression" target="_blank">logistic regression model</a> using all of the features (HINT: use RFormula). Put the pre-processing step and the Logistic Regression Model into a Pipeline.
# COMMAND ----------
# TODO
from pyspark.ml import Pipeline
from pyspark.ml.feature import RFormula
from pyspark.ml.classification import LogisticRegression
r_formula = RFormula(<FILL_IN>)
lr = <FILL_IN>
pipeline = Pipeline(<FILL_IN>)
pipeline_model = pipeline.fit(<FILL_IN>)
pred_df = pipeline_model.transform(<FILL_IN>)
# COMMAND ----------
# MAGIC %md ## Evaluate
# MAGIC
# MAGIC What is AUROC useful for? Try adding additional evaluation metrics, like Area Under PR Curve.
# COMMAND ----------
# TODO
from pyspark.ml.evaluation import BinaryClassificationEvaluator, MulticlassClassificationEvaluator
mc_evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
print(f"The accuracy is {100*mc_evaluator.evaluate(pred_df):.2f}%")
bc_evaluator = BinaryClassificationEvaluator(metricName="areaUnderROC")
print(f"The area under the ROC curve: {bc_evaluator.evaluate(pred_df):.2f}")
# COMMAND ----------
# MAGIC %md ## Add Hyperparameter Tuning
# MAGIC
# MAGIC Try changing the hyperparameters of the logistic regression model using the cross-validator. By how much can you improve your metrics?
# COMMAND ----------
# TODO
from pyspark.ml.tuning import ParamGridBuilder
from pyspark.ml.tuning import CrossValidator
param_grid = <FILL_IN>
evaluator = <FILL_IN>
cv = <FILL_IN>
pipeline = <FILL_IN>
pipeline_model = <FILL_IN>
pred_df = <FILL_IN>
# COMMAND ----------
# MAGIC %md ## Evaluate again
# COMMAND ----------
mc_evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
print(f"The accuracy is {100*mc_evaluator.evaluate(pred_df):.2f}%")
bc_evaluator = BinaryClassificationEvaluator(metricName="areaUnderROC")
print(f"The area under the ROC curve: {bc_evaluator.evaluate(pred_df):.2f}")
# COMMAND ----------
# MAGIC %md ## Super Bonus
# MAGIC
# MAGIC Try using MLflow to track your experiments!
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 38.965714
| 388
| 0.733979
|
794e6db7ca37fa5d225540ae06865448db35ff2e
| 443
|
py
|
Python
|
game/migrations/0006_profile_num_antidotes.py
|
shintouki/augmented-pandemic
|
94725f0390c4bf86ff613f7a1efa477324bbbefe
|
[
"MIT"
] | null | null | null |
game/migrations/0006_profile_num_antidotes.py
|
shintouki/augmented-pandemic
|
94725f0390c4bf86ff613f7a1efa477324bbbefe
|
[
"MIT"
] | 12
|
2016-12-28T13:16:03.000Z
|
2016-12-28T13:16:03.000Z
|
game/migrations/0006_profile_num_antidotes.py
|
shintouki/augmented-pandemic
|
94725f0390c4bf86ff613f7a1efa477324bbbefe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-07 21:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0005_safezone'),
]
operations = [
migrations.AddField(
model_name='profile',
name='num_antidotes',
field=models.IntegerField(default=0),
),
]
| 21.095238
| 49
| 0.609481
|
794e6f2bc13b79438204ca71e53c92258a2addc2
| 1,776
|
py
|
Python
|
pyscript/ml/bayes.py
|
airy-ict/learn_python
|
5a6c45c627208856bb04c2545fae8cba903519d3
|
[
"MIT"
] | 1
|
2021-06-07T09:01:21.000Z
|
2021-06-07T09:01:21.000Z
|
pyscript/ml/bayes.py
|
airy-ict/learn_python
|
5a6c45c627208856bb04c2545fae8cba903519d3
|
[
"MIT"
] | null | null | null |
pyscript/ml/bayes.py
|
airy-ict/learn_python
|
5a6c45c627208856bb04c2545fae8cba903519d3
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy as sp
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.datasets import load_files
from sklearn.feature_extraction.text import *
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import classification_report
from sklearn import neighbors
from sklearn.externals import joblib
import os
def main():
"""
朴素贝叶斯实现
"""
# 加载数据
movies_reviews = load_files("data/tokens")
sp.save('data/movie_data.npy', movies_reviews.data)
sp.save('data/movie_target.npy', movies_reviews.target)
movie_data = sp.load('data/movie_data.npy')
movie_target = sp.load('data/movie_target.npy')
x = movie_data
y = movie_target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
count_vec = TfidfVectorizer(
binary=False, decode_error='ignore', stop_words="english")
# 训练数据
x_train = count_vec.fit_transform(x_train)
x_test = count_vec.transform(x_test)
# 分类器
clf = MultinomialNB().fit(x_train, y_train)
# doc_pred = clf.predict(x_test)
# print("平均值:", np.mean(doc_pred == y_test))
# 可用 clf.score 代替以上均值
score = clf.score(x_test, y_test)
print("score:",score)
# 准确率 召回率
precision, recall, thresholds = precision_recall_curve(
y_test, clf.predict(x_test))
answer = clf.predict_proba(x_test)[:, 1]
report = answer > 0.5
print(classification_report(y_test, report, target_names=['net', 'pos']))
# 特征名称
# print(count_vec.get_feature_names())
# 保存模型
curr_path = os.getcwd()
model_path = curr_path + "\models\clf_bayes.model"
joblib.dump(clf, model_path, compress=0)
if __name__ == '__main__':
main()
| 28.190476
| 77
| 0.70045
|
794e6f9eb965b2b2e8e31988e4a96a9067079d46
| 7,341
|
py
|
Python
|
mvg_distributions/covariance_representations/covariance_chol.py
|
Liang813/tf_mvg
|
01bc681a8b3aac5dcf0837d481b963f4968eb777
|
[
"MIT"
] | 21
|
2019-04-04T07:46:54.000Z
|
2021-12-15T18:06:35.000Z
|
mvg_distributions/covariance_representations/covariance_chol.py
|
Liang813/tf_mvg
|
01bc681a8b3aac5dcf0837d481b963f4968eb777
|
[
"MIT"
] | 8
|
2019-03-01T10:08:30.000Z
|
2021-10-04T13:00:11.000Z
|
mvg_distributions/covariance_representations/covariance_chol.py
|
Liang813/tf_mvg
|
01bc681a8b3aac5dcf0837d481b963f4968eb777
|
[
"MIT"
] | 7
|
2019-12-18T23:41:44.000Z
|
2021-11-21T10:15:48.000Z
|
import tensorflow as tf
from mvg_distributions.covariance_representations.covariance_matrix import Covariance, DecompMethod
class _CovarianceCholeskyCommon(Covariance):
def __init__(self, inversion_method=None, **kwargs):
self._log_diag_chol_covariance = None
self._log_diag_chol_precision = None
if inversion_method is None:
inversion_method = DecompMethod.CHOLESKY
super(_CovarianceCholeskyCommon, self).__init__(inversion_method=inversion_method, **kwargs)
@property
def log_diag_chol_covariance(self):
if self._log_diag_chol_covariance is None:
self._log_diag_chol_covariance = self._build_log_diag_chol_covariance()
return self._log_diag_chol_covariance
@log_diag_chol_covariance.setter
def log_diag_chol_covariance(self, value):
self._log_diag_chol_covariance = value
def _build_log_diag_chol_covariance(self):
with tf.name_scope("DiagCholCovariance"):
diag_c = tf.matrix_diag_part(self.chol_covariance, name="diag_chol_covariance")
return tf.log(diag_c, name="log_diag_chol_covariance")
@property
def log_diag_chol_precision(self):
if self._log_diag_chol_precision is None:
self._log_diag_chol_precision = self._build_log_diag_chol_precision()
return self._log_diag_chol_precision
@log_diag_chol_precision.setter
def log_diag_chol_precision(self, value):
self._log_diag_chol_precision = value
def _build_log_diag_chol_precision(self):
with tf.name_scope("DiagCholPrecision"):
diag_c = tf.matrix_diag_part(self.chol_precision, name="diag_chol_precision")
return tf.log(diag_c, name="log_diag_chol_precision")
def _build_log_det_covariance_with_chol(self):
with tf.name_scope("DiagCholCovariance"):
if self._build_with_covariance:
return 2.0 * tf.reduce_sum(self.log_diag_chol_covariance, axis=1, name="log_det_covar")
else:
log_det = 2.0 * tf.reduce_sum(self.log_diag_chol_precision, axis=1)
return tf.negative(log_det, name="log_det_covar")
class CovarianceCholesky(_CovarianceCholeskyCommon):
def __init__(self, chol_covariance, **kwargs):
super(CovarianceCholesky, self).__init__(**kwargs)
tf.assert_rank(chol_covariance, 3, message="Size must be [batch dim, feature dim, feature dim]")
self._chol_covariance = chol_covariance
self.dtype = self._chol_covariance.dtype
if self._chol_covariance.shape.is_fully_defined():
self._matrix_shape = self._chol_covariance.shape
else:
self._matrix_shape = tf.shape(self._chol_covariance)
def _build_covariance(self):
with tf.name_scope("Covariance"):
return tf.matmul(self.chol_covariance, self.chol_covariance, transpose_b=True, name="covariance")
def _build_chol_covariance(self):
return self._chol_covariance
def _build_covariance_diag_part(self):
with tf.name_scope("covariance_diag_part"):
return tf.einsum('bij,bij->bi', self.chol_covariance, self.chol_covariance)
def x_precision_x(self, x, mean_batch=False, no_gradients=False):
"""
:param x: input, should be [batch dim, num_samples, num features], or [batch dim, num features]
:param mean_batch: if True do the mean over the batch
:param no_gradients: if True, do not back-propagate gradients on the Cholesky
:return:
"""
# , M = cholesky(covariance)
# x (M M^T)^-1 x^T = x (M^T)^-1 * M^-1 x^T -> M^-1 x^T -> M y^T = x^T
# Solve the M system for y^T and multiply by the solution by itself
if x.shape.ndims == 2:
x = tf.expand_dims(x, 2)
else:
x = tf.transpose(x, perm=[0, 2, 1])
# x should be [batch dim, num features, num_samples]
x.shape[0:2].assert_is_compatible_with(self.chol_covariance.shape[0:2])
if no_gradients:
chol_covariance = tf.stop_gradient(self.chol_covariance)
else:
chol_covariance = self.chol_covariance
# Compute x * Cholesky
x_chol_precision = tf.matrix_triangular_solve(chol_covariance, x)
# Compute matmul((x * Cholesky),(x * Cholesky)) and sum over samples
squared_error = tf.multiply(x_chol_precision, x_chol_precision)
squared_error = tf.reduce_sum(squared_error, axis=1) # Error per sample
if squared_error.shape[1].value == 1:
squared_error = tf.squeeze(squared_error, axis=1, name="x_precision_x") # Remove sample dim
if mean_batch:
squared_error = tf.reduce_mean(squared_error, name="mean_x_precision_x")
return squared_error
class PrecisionCholesky(_CovarianceCholeskyCommon):
def __init__(self, chol_precision, **kwargs):
super(PrecisionCholesky, self).__init__(**kwargs)
tf.assert_rank(chol_precision, 3, message="Size must be [batch dim, feature dim, feature dim]")
self._chol_precision = chol_precision
self._build_with_covariance = False
self.dtype = self._chol_precision.dtype
if self._chol_precision.shape.is_fully_defined():
self._matrix_shape = self._chol_precision.shape
else:
self._matrix_shape = tf.shape(self._chol_precision)
def _build_covariance(self):
return self._inverse_covariance_or_precision()
def _build_precision(self):
with tf.name_scope("Precision"):
return tf.matmul(self.chol_precision, self.chol_precision, transpose_b=True, name="precision")
def _build_chol_precision(self):
return self._chol_precision
def _build_precision_diag_part(self):
with tf.name_scope("precision_diag_part"):
return tf.einsum('bij,bij->bi', self._chol_precision, self._chol_precision)
def x_precision_x(self, x, mean_batch=False, no_gradients=False):
"""
:param x: input, should be [batch dim, num_samples, num features], or [batch dim, num features]
:param mean_batch: if True do the mean over the batch
:param no_gradients: if True, do not back-propagate gradients on the Cholesky
:return:
"""
# M = cholesky(covariance)
# x M M^T x^T = (x M) (M x)^T = y y^T
if x.shape.ndims == 2:
x = tf.expand_dims(x, 1)
# x should be [batch dim, num_samples, num features]
x.shape[0:3:2].assert_is_compatible_with(self.chol_covariance.shape[0:3:2])
if no_gradients:
chol_precision = tf.stop_gradient(self.chol_precision)
else:
chol_precision = self.chol_precision
# Compute x * Cholesky
x_chol_precision = tf.matmul(x, chol_precision)
# Compute matmul((x * Cholesky),(x * Cholesky)) and sum over samples
squared_error = tf.multiply(x_chol_precision, x_chol_precision)
squared_error = tf.reduce_sum(squared_error, axis=2) # Error per sample
if squared_error.shape[1].value == 1:
squared_error = tf.squeeze(squared_error, axis=1, name="x_precision_x") # Remove sample dim
if mean_batch:
squared_error = tf.reduce_mean(squared_error, name="mean_x_precision_x")
return squared_error
| 42.929825
| 109
| 0.680016
|
794e701bbd46b3cca35493e389c73a79e3cc5308
| 6,073
|
py
|
Python
|
YCgplearn/skutils/tests/test_utils.py
|
eggachecat/YCgplearn
|
098bda558f5bb986e9dab70e82394602aca6519c
|
[
"BSD-3-Clause"
] | null | null | null |
YCgplearn/skutils/tests/test_utils.py
|
eggachecat/YCgplearn
|
098bda558f5bb986e9dab70e82394602aca6519c
|
[
"BSD-3-Clause"
] | null | null | null |
YCgplearn/skutils/tests/test_utils.py
|
eggachecat/YCgplearn
|
098bda558f5bb986e9dab70e82394602aca6519c
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from YCgplearn.skutils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest)
from YCgplearn.skutils import check_random_state
from YCgplearn.skutils import deprecated
from YCgplearn.skutils import resample
from YCgplearn.skutils import safe_mask
from YCgplearn.skutils import column_or_1d
from YCgplearn.skutils import safe_indexing
from YCgplearn.skutils import shuffle
from sklearn.utils.extmath import pinvh
from YCgplearn.skutils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
| 32.132275
| 80
| 0.629178
|
794e715b3bbca356c804c167be044f11a065b5f2
| 4,636
|
py
|
Python
|
final_yr_proj/tf_utils1.py
|
kauku123/Undergraduate_Fin_Proj_2018
|
e635d03c05785ca898c7a6bc48261de81318be26
|
[
"Apache-2.0"
] | null | null | null |
final_yr_proj/tf_utils1.py
|
kauku123/Undergraduate_Fin_Proj_2018
|
e635d03c05785ca898c7a6bc48261de81318be26
|
[
"Apache-2.0"
] | null | null | null |
final_yr_proj/tf_utils1.py
|
kauku123/Undergraduate_Fin_Proj_2018
|
e635d03c05785ca898c7a6bc48261de81318be26
|
[
"Apache-2.0"
] | null | null | null |
import h5py
import numpy as np
import tensorflow as tf
import math
def load_dataset():
train_dataset = h5py.File('datasets/train_signs.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_signs.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
mini_batch_size - size of the mini-batches, integer
seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
m = X.shape[1] # number of training examples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = int(math.floor(m/mini_batch_size)) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]
mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def convert_to_one_hot(Y, C):
Y = np.eye(C)[Y.reshape(-1)].T
return Y
def predict(X, parameters):
W1 = tf.convert_to_tensor(parameters["W1"])
b1 = tf.convert_to_tensor(parameters["b1"])
W2 = tf.convert_to_tensor(parameters["W2"])
b2 = tf.convert_to_tensor(parameters["b2"])
W3 = tf.convert_to_tensor(parameters["W3"])
b3 = tf.convert_to_tensor(parameters["b3"])
params = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
x = tf.placeholder("float", [12288, 1])
z3 = forward_propagation_for_predict(x, params)
p = tf.argmax(z3)
sess = tf.Session()
prediction = sess.run(p, feed_dict = {x: X})
return prediction
def forward_propagation_for_predict(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
# Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3
return Z3
| 38.31405
| 136
| 0.630716
|
794e717c501359fedebe6c1d19cdf62e0ee958b3
| 1,171
|
py
|
Python
|
modules/burpstate.py
|
bellma101/Osmedeus
|
3713c73edd75bda25423100e4de9c1ffd1cab116
|
[
"Apache-2.0"
] | null | null | null |
modules/burpstate.py
|
bellma101/Osmedeus
|
3713c73edd75bda25423100e4de9c1ffd1cab116
|
[
"Apache-2.0"
] | null | null | null |
modules/burpstate.py
|
bellma101/Osmedeus
|
3713c73edd75bda25423100e4de9c1ffd1cab116
|
[
"Apache-2.0"
] | null | null | null |
import os
from core import execute
from core import utils
class BurpState(object):
"""docstring for PortScan"""
def __init__(self, options):
utils.print_banner("Scanning through BurpState")
utils.make_directory(options['env']['WORKSPACE'] + '/burpstate/')
self.options = options
self.initial()
def initial(self):
self.linkfinder()
self.sqlmap()
self.sleuthql()
def linkfinder(self):
utils.print_good('Starting linkfinder')
cmd = '$PLUGINS_PATH/linkfinder.py -i $BURPSTATE -b -o cli | tee $WORKSPACE/burp-$TARGET-linkfinder.txt'
cmd = utils.replace_argument(self.options, cmd)
execute.run(cmd)
utils.print_info("Execute: {0} ".format(cmd))
def sqlmap(self):
utils.print_good('Starting sqlmap')
cmd = '$PLUGINS_PATH/sqlmap/sqlmap.py -l $BURPSTATE --batch $MORE'
cmd = utils.replace_argument(self.options, cmd)
execute.run(cmd)
utils.print_info("Execute: {0} ".format(cmd))
def sleuthql(self):
utils.print_good('Starting sleuthql')
cmd = 'python3 $PLUGINS_PATH/sleuthql/sleuthql.py -d $TARGET -f $BURPSTATE'
cmd = utils.replace_argument(self.options, cmd)
execute.run(cmd)
utils.print_info("Execute: {0} ".format(cmd))
| 31.648649
| 106
| 0.722459
|
794e72461d022da8bbe68d41a3cf2e55a06154eb
| 1,944
|
py
|
Python
|
fahnder/document.py
|
klorenz/fahnder
|
98b4a131b9a298da6b44e291be1cceb028dbb074
|
[
"MIT"
] | null | null | null |
fahnder/document.py
|
klorenz/fahnder
|
98b4a131b9a298da6b44e291be1cceb028dbb074
|
[
"MIT"
] | null | null | null |
fahnder/document.py
|
klorenz/fahnder
|
98b4a131b9a298da6b44e291be1cceb028dbb074
|
[
"MIT"
] | null | null | null |
from typing import Union, Any
from datetime import datetime
class Document(dict):
def __init__(self,
type: str,
url: str,
title: str = None,
excerpt: str = None,
published_at: Union[datetime, str] = None,
thumbnail_url: str = None,
mimetype: str = None,
content: str = None,
weight: float = 1.0,
fields: dict = None,
):
"""A Search result representation
Args:
type (str): Type of the document.
Can be one of 'page', 'news', 'video', 'image', 'audio'
url (str):
Url of the Document
title (str, optional):
Title of the document. Defaults to None.
excerpt (str, optional):
Excerpt to be displayed. Defaults to None.
published_at (Union, optional):
Date of last modification/publishing. Defaults to None.
thumbnail_url (str, optional):
Url to a thumbnail. Defaults to None.
mimetype (str, optional):
Mimetype. Defaults to None.
weight (float, optional):
Weight of this document in search results. Defaults to 1.
content (str, optional): Content of the document. Defaults to None.
This is usually not used in a search result, but if you return
this document as an answer, this is expected.
fields (dict, optional): extra fields
"""
assert type in ('page', 'image', 'video', 'news', 'audio', 'issue')
self['type'] = type
self['url'] = url
self['title'] = title
self['excerpt'] = excerpt
self['fields'] = fields
self['published_at'] = published_at
self['thumbnail_url'] = thumbnail_url
self['mimetype'] = mimetype
self['content'] = content
self['weight'] = weight
| 34.714286
| 79
| 0.543724
|
794e73180f4450765b53ebceb3c6ad63aa1fa0cb
| 50,148
|
py
|
Python
|
4. Sequences, Time Series and Prediction/Week 1/Exercise_1_Create_and_predict_synthetic_data_Question-FINAL.py
|
DhruvAwasthi/TensorFlowSpecialization
|
aeaa57eefd74f96f7389458662e050667eab7a54
|
[
"Apache-2.0"
] | 2
|
2020-06-09T08:01:00.000Z
|
2020-10-22T16:53:08.000Z
|
4. Sequences, Time Series and Prediction/Week 1/Exercise_1_Create_and_predict_synthetic_data_Question-FINAL.py
|
DhruvAwasthi/TensorFlowSpecialization
|
aeaa57eefd74f96f7389458662e050667eab7a54
|
[
"Apache-2.0"
] | null | null | null |
4. Sequences, Time Series and Prediction/Week 1/Exercise_1_Create_and_predict_synthetic_data_Question-FINAL.py
|
DhruvAwasthi/TensorFlowSpecialization
|
aeaa57eefd74f96f7389458662e050667eab7a54
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# ATTENTION: Please do not alter any of the provided code in the exercise. Only add your own code where indicated
# ATTENTION: Please do not add or remove any cells in the exercise. The grader will check specific cells based on the cell position.
# ATTENTION: Please use the provided epoch values when training.
import tensorflow as tf
print(tf.__version__)
# EXPECTED OUTPUT
# 2.0.0-beta1 (or later)
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
def plot_series(time, series, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.1,
np.cos(season_time * 7 * np.pi),
1 / np.exp(5 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
time = np.arange(4 * 365 + 1, dtype="float32")
baseline = 10
series = trend(time, 0.1)
baseline = 10
amplitude = 40
slope = 0.01
noise_level = 2
# Create the series
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
# Update with noise
series += noise(time, noise_level, seed=42)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
# EXPECTED OUTPUTdata:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAmYAAAFzCAYAAACU38U/AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOydd5wU5f3HP89su07n6BxIkyJVUFFcEXss0WjUxMQ0k/zSNEYliTEae4wxxhgjMcYeewdBBA4EBOFAeoc7OModHNfrluf3x+zszk7b2b3dndnb7/v14sXu7JTn5pl5nu/zrYxzDoIgCIIgCMJ6BKsbQBAEQRAEQYiQYEYQBEEQBGETSDAjCIIgCIKwCSSYEQRBEARB2AQSzAiCIAiCIGwCCWYEQRAEQRA2wWl1A5JB7969eUlJScqv09zcjPz8/JRfJ5Oge6IN3Rc1dE+0ofuihu6JNnRf1GTqPSkrKzvBOe+j9VuXEMxKSkqwfv36lF+ntLQUXq835dfJJOieaEP3RQ3dE23ovqihe6IN3Rc1mXpPGGMVer+RKZMgCIIgCMImkGBGEARBEARhE0gwIwiCIAiCsAkkmBEEQRAEQdgEEswIgiAIgiBsAglmBEEQBEEQNoEEM4IgCIIgCJtAghlBEARBEIRNIMGMIAiCIAjCJpBgRhAEQRAEYRNIMCMIgiAIgrAJJJgRBEEQBEEAWLX3BHZXNVraBhLMCIIgCIIgAPzyfxvx0hfllraBBDOCIAiCIAgA3OoGgAQzgiAIgiAIAADnHAzM0jaQYEYQBEEQBGETSDAjCIIgCIKAaMpk1irMSDAjCIIgCIKQsFguI8GMIAiCIAgCALgNvP9JMCMIgiAIgkDI+d9iWyYJZgRBEASh4A/vb8V3nv/S6mYQWYjT6gYQBEEQhN14eU0FAKDNF0COy2Fxa4h0YQNLJmnMCIIgCEKPMX9YiK2H661uBpFGKCqTIAiCIGzMpso6q5tApAsbqMxIMCMIgiAIGUfqWqO+2yFSj0gPHKDM/wRBEARhJ655ZnXUd06SWVaR1aZMxlh3xtjbjLGdjLEdjLEzGWM9GWOLGWN7Qv/3sLKNBEEQRHZxtL4t6nuQ5LKswQ5CuNUasycBLOScjwEwEcAOAHMBLOGcjwSwJPSdIAiCICzBDpM1kT6yNvM/Y6wbgFkA/gMAnPMOznkdgCsBvBja7UUAV1nTQoIgCIIgjVk2YYeuZlatBBhjkwDMA7AdorasDMCvABzmnHcP7cMA1ErfFcffAuAWACguLp76+uuvp7zNTU1NKCgoSPl1Mgm6J9rQfVFD90Qbui9qrL4nNy9sjvp+wxg3LipxWdSaCFbfFzuS7Htyy+JmzB7sxPVjPEk7pxbnnXdeGed8mtZvViaYdQKYAuAXnPO1jLEnoTBbcs45Y0xTcuScz4Mo2GHatGnc6/WmuLlAaWkp0nGdTILuiTZ0X9TQPdGG7osay+/JwvlRX4cPPwXeWcMtakwEy++LDUn2PXEsWYghQ4bA6z01aeeMFyt9zCoBVHLO14a+vw1RUKtijPUHgND/1Ra1jyAIgiDAbWHgItKBHfraMsGMc34MwCHG2OjQpvMhmjU/BPDd0LbvAvjAguYRBEEQWYpTiHb/Jh+z7MJq53+ra2X+AsCrjDE3gP0AvgdRWHyTMfYDABUArrOwfQRBEESW4XQw+GXSGAVlZg926GtLBTPO+VcAtJzfzk93WwiCIAgCAFyCgDYEw9+DdpitibTAActVZlbnMSMIgrAtze1+XPPMauyparS6KUQacTiiZ2bKY5ZdUEkmgiAIm/J2WSXKKmrx8Cc7rW4KkUaUPmYkl2URNuhrEswIgiA0KKuoxR8/3AYAaOnwW9waIp04heipkZz/swcOnt21MgmCIOyKvJB1S0fAwpYQ6capMGU+8dlui1pCWIHVUZkkmBEEQcSABLPsQmnKJLIHO5itSTAjCIKIQSsJZlmFYLUti7AUq7ufBDOCIAgN5INzq48Es2zC6omZsA4bKMxIMCMIgtBCPjeT83920S3X+oLlhDVwzildBkEQhN1p8wXx2tqDVjeDSCPnjOxtdRMIi7BaY0qCGUEQhAl+994Wq5tApAk7mLMIa7BD35NgRhAEoQGzetlMWIZWZF6QkpllDVa/+SSYEQRBEEQMAnbIo0CkHDt0MwlmBEEQGkwv6Rn1vV9RjkUtIdINh1pjGiCNWfZgsbacBDOCIAgNRhYXhD/PGtUH3BbeJ0S6UE7NfhLMsgYyZRIEQdicQo+TksxmExr2rECABLOuDreDHRMkmBEEQWgiH6MLc5xo8wetawyRVkRTZvQ2f5D6P1uwOu6HBDOCiMHzKw/gwIlmq5tBWEi+x4kOf5Ai87II5dxMPmZdH5sozEgwIwgjGtt8+NPH23Hds19Y3RQizch9ygo8TgBAm5/MmdmA1gS9eEdV+htCpBWp2ynzP0HYmLoWHwDgeGO7xS0hrCQsmPnInJUtKKMyf//eVotaQqQbMmUShI052dwR/uwL0KScTci1Jh6XOFSSOSs7oAjc7ISc/wkiA6htiQhmI3//CZXlyUKunDQATkEcKoM2GbiJ1MK59SkTCOuwuu9JMCMIA+pbfVHfqZB19sAB9C5w48nrJ8MRGikpl1X2YLU5i0g/dnm7STCLg60n/OigkPmswke5i7IWUTkmzs5CaJamqMzsQFKMFuY4rW0IkVakfrdaKCfBzCRlFbX4y/p2PL54l9VNIdKIls/B3upGC1pCWIE0QDsd4gfyMcsmGFbeNRs3n1VidUOINKMM/Eg3JJiZ5HBdKwCgsrbV4pYQ6URrGv50O4XNZweR3pc0ZmTKzA6kXu6W60K3XJelbSHSh12CPkgwM0lrhx8AkOtyWNwSwmoKc2igzhakdbNDCJkyyfk/K+Cch7WlF43rBwDoXeCxsEVEOrDL602CmUlaQnXy8twkmGUVGi9qEfmdZAXyQdrByJSZbUhC+dgBRZg+rCdONLXjmdJ9lraJSA/kY5YhSIJZLglmWQ9pTbMHaYCWNGYkmGUnzlD/P7pwp8UtIbIBEsxMsONoAx5bJDr957lIW5JNaPkc0NycHURpzEgwyzrkWhOng6bKbCKrSzIxxsoZY1sYY18xxtaHtvVkjC1mjO0J/d/DyjYCgMcZuU0fbjqMhjafwd5EV0KanH923il4/NqJAMjPKJuQBmhBEsyo77MCZTe7BEpqlg3Y5fW2wzLgPM75JM75tND3uQCWcM5HAlgS+m4p3fPc4c/7jjfjjrc2Wdgawgq+fcZQTBzcDQBF5mULcm2pZMqiPGbZAQeP0ppI6VKIro30zpOPmZorAbwY+vwigKssbAsAtbO3lDqD6PrIp2FKMppdcC7zMSPn/6wjypQp2HGqJFKF1WK41U8bB/ApY6yMMXZLaFsx5/xo6PMxAMXWNC2C0r/ALupOIn0wMPIzykKkAVqgvs8qaIzPTuzS71Z7sp/NOT/MGOsLYDFjLCrkhXPOGWOatyokyN0CAMXFxSgtLU15YyUaG5vSej0709TUte/FrkOiP+EXX6yGVJ1p+44d6NW41/C4rn5fEiHT7snRY+1oaw+gtLQUe2rFqOwNX21CR2Vyo3Iz7b6kA6vvSXNLC44fbw23oaq6Lfyble2y+r7YkWTekza/OMjv378fpfxQUs6ZCJYKZpzzw6H/qxlj7wGYDqCKMdafc36UMdYfQLXOsfMAzAOAadOmca/Xm9K2/tF1APd9tB0AkF9QAK/3nJReL1MoLS1Fqu+9lRxeWwFs24qzzjpLdPpfvhQjRo2Gd/oQw+O6+n1JhEy7Jx8f34QDzTXwer0oOlgLrF2N8RMmwDu6b1Kvk2n3JR1YfU/yNixH374F8HqnAgBeO7geqBIrfljZLqvvix1J5j1pavcDny3CKacMh3fWKUk5ZyJYZspkjOUzxgqlzwAuBLAVwIcAvhva7bsAPrCmhdF8b+aw8Get+olE14aB/IyyDUowm71wHu38T92eHUhzu9XpMqzUmBUDeC9ULNQJ4DXO+ULG2DoAbzLGfgCgAsB1FraRyHLkk7NAZXmyFvIvzEJkczMtxrMLq6MyLRPMOOf7AUzU2F4D4Pz0t8g89I5mD+GuZqQ1yTbk6TKoVmZ2oezlIhOFzO9+fwvGD+iG62O4ORD2xS5vt9XO/xmJXSrQE+mDgcHhIMEs21CXZLKwMUT64NEpE3rlu3V3lXhlzUEAIMGM6DRWp8vISGheziJkGhLSmGUZcjN2qO/9QZLMspFR/QqtbgKRBuyiECfBLA6sTjpHWAdjMq2JXd5eIqVwRDRmTjJlZhVi30dG/GunDgIADOudb1GLiLQQer2ZxU5mJJjFwc3jRXW2vHYm0bWhzP/ZjRSdRabM7EM+NTPGcPnEAZa1hUgvVithyMcsDs4d5MKhQHccqWuLvTPRJZAUJAwRrQlNztmBPBJPoFqZWYVWFKZTYOTG0MWxi/84qX7ixO0U0EEzc9bBGAtPzi+vKbe2MUTaUJoyqYB9diA3Y0s4SDDr8oQX4lTEPLNwOwT4SDDLGrRWzieaOixoCZFutMzY5F+YvTgFRsEfWYLVpkwSzOLE5RDQ4aeXM9tQvqibDtVZ0g4ivUj97nGJQ2VdMwnl2QDn6ndeMKkx+/17WyghbYZil14jwSxO3E4SzLIJvRf1yqdXpbUdRPqRz61FOS5MGNgNS3dplu4luiDKyDxRYxZ76n517UEyeWc4FJWZYbgc5GOWTdjF54CwBvkAfdG4Ymw8WIdj9RT809XRcgJ3CAyBgDmBy29yP8Je2EXTSYJZnHhIY5aVWF3Ulkg/yiH64vH9AABLdlalvzFEWtEyZToFZtrHkHzRMhOpd61eiJNgFicuh4B2fxB1LeRrkg3YY/1EWAHnPGpyPqVPAfLcDuyrbrasTUQaUUVlCqZNlKv21mDt/poUNIpIB1Yvw0kwi5MTTe0AgNve+MrilhDpgMsTmRHZh6zfGWMY0D0XR+padXe/7Y2vcO2/VqehYUQq0VKM5bhEa4mZqPyfvFKGb85bk4KWEanEJpZMSjAbLydDUVmHDQZngiAyH60xekD3XByt13/339t4OHUNItKK0n2hZ6iQeV2LD30KPVY0iUgX5PyfWTS2+QEA+R6SabMJ6T199JoJ1jaESCvK4Tnf7UCrLxDzuEXbjqWmQYRlSILZSUqZ0mWhzP8ZyrgBRQCAAd1yLW4JkU6kCdouqm4iDWj0tdMhwGci4u7HL5eloEFEuuCcq5QmkmBW09xuQYuItGATzxUSzOLkzovHAACKi3IsbgmRDpSCGKUnyi6U+YxcDkaVP7IE5eTcu0A0X1Llj64PRWVmGG6ngB55LnQEYpsziMxHUm1LE/T0YT2sbA6RRrTMGi5BoBxVWYBWDw/sLlpJKmtbNI/pFdKoyVm99wRK5s7HoZPaxxD2wi5vNglmCUDZ/7MPaQE1om8hZo7ohalDSUDLBlS5rEhjljUotSb5Hid65rtRWasd/MEY0FcRFPBWWSUA4MsDJ1PSRiI1WJ23kgSzBCDBLHvQ8ikTmLmaeURmo9X3LodAglkWoOdL2rfQg+ON2j5mnIvPh+ZvyWoYkVLs4kNMglkCuKksU9YhXz07BIagXd5gIqUotSYuh7l6iURmw8E1tSYelwPtBotypyP6GPm35nY/fvJyGZX0sjER1xVr20GCWQK4nQ7SmGUJWlOwgzRmWYGW7O0kjVnWoDU5e5wC2nXSpXCIZZuU2wAxynP+lqNYuO0Y/vLpruQ2lEg6FJWZgbidguGqieg6RBL/R15VQSDBLBvQ0pq4BAZfgNum2DGRGvS61xNj7HcK0VOqJMS3dARw9/tbAYDGDhtjl9eaBLME8DjIxyzbiDJlMjJlZgtqU6Y4ZNLk2vXR1pjpmzI556pI3g0VtQCAN9YdCs8Z7208TAmIbQ6ZMjMQt5N8zLIFrZQJDtKYZQV6pkwAppLMEpmLXu96XALa/fqpkpTDwpGQP5nbGT3VUgJie2KXt5oEswTIczvQ2kF5zLIBzahMgVGi2SzFFXLu9gVjL8zmrdiX6uYQKUJ879VqkxynA+0+HY0ZEDZxK2tp5rkdqv0fXbgTQRpIbIXUf5QuIwMpynWhodVndTMIi3AwMmVlA1o9LJkyzSSZfWjBziS3iEgnmqZMl7GPmbSQG11cGLU916UWzJ4p3Yfd1Y2daiORIsiUmXkU5bjQECpmTmQH8kGanP+zB2VJJikdgtnIzDYTBc8JO6L9fovO/zpRmRzICQlgxUU5KMpxhn8TBO2ZnqpI2Au7uA6TYJYARblONLX74Sc/s6xBrtom5//sQDPBrCD5mJl792uaqa5ipqIlShk5/wPA6SU98MjVE3DflePw+HWTwtv15goaR+wJpcvIQIpyXACApnbSmnV1tNIikPN/9qBVkgkw7/x/QidLPGFvjNJldPiDmuMC5xyMMVw/fQgKPE7kuCLTq15SYvJVtidKTXm6sVwwY4w5GGMbGWMfh74PY4ytZYztZYy9wRhTV4a1mL5FomPnmv1U/6yrE85jpjBl0ko3G1D3cd/CHADAqr0nTJ2hhSbejIRD38cMgKk8ljkyvzK9gIEWMnXbCrsM65YLZgB+BWCH7PujAJ7gnI8AUAvgB5a0yoALxhYDAHYdI8fNbEE+RlPm/+yAc/XkPHNELzAGHKnTLmStxG8iepOwJ5olmZyisKUlmClHBLnDv55fGmnM7ElWmzIZY4MAXAbgudB3BmA2gLdDu7wI4CprWqeP9HIGaNDt8miWZCJTZtagFMwYY8h1OUz7mJFzd2aiV9lBMk/qlWWSPy9yU2abnsaMBDNboZW30gqcsXdJKX8DcCcAKba4F4A6zrnkvFUJYKDWgYyxWwDcAgDFxcUoLS1NbUsBNDU1ha/jYMD+8gqUlh5N+XXtjPyedEUOHBCdt1esWA4hNOoeOdyODp/f8O/u6vclETLtnpyoaUNjO1e1mfEADlQcQmlpteqYU3sK2HEyMglv3LQZ7JjxMJtp9yUdWH1POnw+HDlyGKWl0SbrA4fFNEnLV65Gn7xovYbf70dlZSVKS48DAI63RJ6DusYmzets2rYDvRv3mm6X1ffFjiTznlQ1i322c+cOlMbRL8nGMsGMMfY1ANWc8zLGmDfe4znn8wDMA4Bp06ZxrzfuU8RNaWkppOs4P/sEAwcPhtd7asqva2fk96Qrssm/B9i7G+ee64UjFPK+qnk72JGDhn93V78viZBp9+Sl8nUINrbD6z07anvuys/Qt38xvN4JqmPm7VkDnKwJf+8+8BSgTz68o/vqXifT7ks6sPqeuFZ8ioEDB8DrHR+1vXHTEWDLRkyedjpG9I3OVeZctgiDBw2G1zsWAHC8sR1Y8RkAgDk9ANpU1xk0dDi8555iul1W3xc7ksx7Un6iGfi8FGPHngrv5EFJOWciWKkxmwngCsbYpQByABQBeBJAd8aYM6Q1GwTgsIVt1MUpMATITNHlkVTbcouWIDAE7OIlSqQMPXOWy8HgM1kr908fbwcA7H/oUt1cVoT94FwvXYaoJdMzTcrJdcf2MSNTpr2wy6humY8Z5/y3nPNBnPMSANcDWMo5/xaAZQC+EdrtuwA+sKiJhjgEphsCTXQ9oqIyGaNSKlmCVmSeyyHo+phxDkwb2gPPfGtK1PaGNqoUkmlopUzwuMw7/+fI6mPqRXG2dFDKJTtCJZnU3AXg14yxvRB9zv5jcXs0cToEcgDPArSUJg5GGrNsQK+HXQ5mmMdMYAynDe4ete0fS/eiZO58mogzBD1tqaQx09OAyWU5qeA9oK8ZI42ZvdDr93RjC8GMc17KOf9a6PN+zvl0zvkIzvm1nHNbZmgkjVl2IPWwfPWc4xLAuf7gDADNPo4v9tXo/k5kBlrrZpdDQIeexiz0xLgUZsvnVh4AAFTWmkuzQViL3sgeEcw0NGYJTOqULsNeRMZ7S5thD8EsE3EKjNJlZCkFHtE1s7ldPzfR3StbccO/11CtxAxGb551OwXjcmwsWlsi57DJ/GeEPZGSxuoljI13PieNGaEFCWYJIjAGKpWZBWjMzgVSSS6dQvb3z9+O2nbxOD3NCmF/OKC5dBZ9zIy1Iw4dR/+jderIPMKGaCQXBiIas5+8UqZyZdF6Im6bM8rwMgu3HUNNky2NQlmJTSyZJJglitNBGrNsQTlAF3jEVbNerVS5ucps9B5hT7RNmUzflMkj+2hxspkm4UxBywFcXmapulEtZCvHil/NGYnvnDnU8Do/f21jYg0kUkbW18rMVMjHLDvQ6uECj3ER+/5FOeHPvgDH1sP1WLuf/M0yDf10GfpRmYAozDkF9dBa4HHiZDNFZ2YCeiN7vieSYcqlMFfraVtiTfFbD9ebbxiRYuwxp5NgliBOKsuTFWjlMyrIEQfnpnbtSbYoNzJ4bz9aj689tRLfnLcGy3ZV4+U1Fcb+SYSt0Fo4u43SZYT+19KY9ch3kcYsQ+Cca/Z9viw3mVbKnFialslDItG6V08ZiLH9i8LjCWE9knBtdcZBEswSxCEIpDHLEpSDreT836jjYyZfSX//hfXhz9/77zr84f2t+O+q8uQ3kkgbLocAn1//3WdMe4LumefGyRbSmGUKWpOzPKhDmTJHr86i/Fm4aFy/8GeP04FzRvbGyeaOsHZ20bZj+OpQXSdaTSQDisrMUEhjlh1oDbaFYY1ZYjmpDtQ0d6pNRPrQ9DFz6qfLMLKE9Mh3o7a5IyntIlKLmZFda/w3ms/7FnrgkM34LgdDj3w32v1BtIait3/8chmuenpVnK0lkoVdZnQSzBLEQYJZ1qAcbPPD6TK0BbNYTwVNzpmBns9QrkswTBSrlzW8Z74bWw7XY09VIy58YjleXlORjGYSKSKW1kQZ+6XrYxY6zy2zhked0yEw9MxzAwBqmmhMsBOU+T9DIY1ZdqA12Oa5HGBMP11GLOrInJUxaJkk8z1OtOjksNMzZwEIT8IXPLECu6ua8If3tyankUTSMZM2wa8Vla8xn8sneXkaFZdDQHE3MVCIEg/bA0qXkeGIUZnkxN3V4VCvnAWBocDtRKOexizGy200eRP2Qa+fCjxONHf4daM2pefl8Wsn4uNfnB3e3iPfnfQ2EqmBg+s68l8wthgAEFT5mGkjnYZzMf+lhENgGF1cCADYU91om3JA2Yz0zpOPWYZCpszsQUutXZDjTFhjRmQGWhG5gKgxC3KE/YKUx0hcM3UQxg/sFv7eLdeVglYSqUJvbr5q0kAA0EwwrjVWnDZIfAZG9SuEINOYOQWG4iIPinKc2HWsMSppMQlp1kJRmRkK5THLDvTGxwKPU9f5P5ZGzGr/BcI8RikT9Ppfb7UtZY2XQxOwPTHqFskcqVqY6xxz5aSBWPYbL84d1QfyghCMMTDGMLpfIXZXNUYJ+vd/vIOeDQuwyy0nwSxBzPqYHTrZkobWEKmCQ1tt4nEJmoWMia6D3iCdb1Ar1WhEcGsIZsN+uwD76qheoi3REbAlwUxpygT0hfJhvfPFY2U7SJ+G9spHZW1rVF3d51cdIF9UCwjnMSNTZmbiEISYgtk7ZZU458/LsK78ZJpaRaQLwzx2MeR1jaTwhE3R0m7GisrVOmZMv0JNjRkAbKwmwcxuGL3CUiozda3M2At1uSlT8jcrzHGisc0fJZgRVkNRmRmJU2Dwxyhk/Pme4wCA8hOUtypj0fEzcgks4Qz+FTUteKessnPtIlKO3kQrCVhmNaa7HrgYH//ibE2NGQC4aBS2H1zf5UASqLQWZrGmc7nzf5tfFMQKc1xoavfjlpfKovb1UXBZ2rFLYBYNCQmS53ag2SCXEQA0hUwdHlnhWyLz0FJrOx36PoYcgFsAltx+Ln5+3gjV75W1rbj9rU1JbiWREnRKMgHQLMuk5RfkcTrgdAiq2ooSTgGoqGnG08v2kl+RjdAzZxmZMmMhfwSq6sUi6IUhDeyuqsaofeXBAGv31+CFVQfivh6RGGTKzFCKcl2obzX2AZBU0xS9l7noDb1OQTDWmDHglD4FuuYrgBy/7Y5e97ic+oIZoD+ou3UFM4bvPP8lHlu0C2+trwyPKw/O345pD3wWX6OJpGCkOZH8xFSmTBOvs1xjVtsiJpXVqyIx85Gl4QCTb85bg3s/2h7+bXdVI9XcTQF2GZJJMEuQbrkuNLb5UVmr79zvDBUybmgjJ85MhXOuadJwOswFf7gMBDOK6rU/WjKWocbM4Fx6pkyHLFnxne9sxv+9Kpq0/v35AZxooqLnVqGnNAlrzDSLmBufUy6Y/fHycQCM54eTGhUB9h1vwoVPrMBfPt0NAPjqUB0WbTtmfGEiLqyOmyfBLEGkQfabz67R3Ufq3PpWHx7+ZAfeXH8oDS0jko2mKVNgUaYGOXJNmJ75CkBMH0XCWvR6R+rTDoNC5lroCWYBHm0W21vdFNd5ieRjKl2GyQSzWsdePK4fSkKRmt+YMiiOdnEcbxSF9Q0HawEAVz29Cj9+Odo/bfXeEyiroKCzRNFLLpwuSDBLkCN1YgmNw3X6pTQk5+CapnY8u3w/7nx7c1raRiQPvQHaKQiGlR+k19rt0H/BX11LtRJtDdcWyt1OcaOWCcpoQleatb8/cxgAUXMqV76QItV6tCp+SAh6ecwQO0ehpDGTC+IjQ9n/tegIREdqml3L3fjcWlzzzBfmdibCkCkzw/nhOcMBiCsfPSTB7EhdW1raRKQGraHWYeT8L9tspDF7YP4OdPiD5GtmY7QmWqlPfTpRmXqrbeWzcP6pfQEAb+32RT0D9DzYAz0hS9/HzES6jNApzQYO/HPZPtwlW9B3xMioUdvcAe9jy0ydm9CHTJkZyrDe+RjUIzec00iL9lA49BEDrRphb3TNWTHSpUgvdve8SBkeKWO8nJe+KMew3y7Awq3kI2I39BzAXQn6mDkVgplcgyZPvXGiqQMnm9W+RUT6MBKydDP/I7aP2YzhvTCgWw5+MXukqXa8u/Ew3pC5wHTIr6nRxGW7qlFeQ0nNE4XSZXQBPE4hLHxp0eYTB1sjcydhbzjX1oA4HfoJhuVbe+RFCle/839nqfb976pyAMBPXilT/UZYj9ZEaySYAfqrbUHxg1yDpsyJJvcz03z6Yr0AACAASURBVHIyJ1JPvOkyzPRSt1wXVv/2fEwc3D2hNilz0CojM/WeScIclPm/C+BxOsLClxaS0EalezIbrXdUdP6P3a/dQ4LZiL4FGNOvSPV7S4xceIR16ClNJCf+Di2NqYGmJc8drV0vzNHXtsuVa9uPNpBwlmaMM/9LGjP1b6mezz854AvPJ+2BIEb8/pOo3zWfSSJurBbM9EcGIiZivURjjVmvfDdqyCyRseipto3SZcjn5h75oikzVyfJcCuVYbE1ms7/sTRmOoN6t1wXPvv1LKzccwI7jzVieJ8C3evKHbe/9tRKvPT96Zg1qo/5hhOdgutU/AAiDvyqqMwkyER5bgdaDBzJlh7yo235PgCiP5mSDlICdAq7iLWkMesEHqeAdiONmS+A/t1zorbVt/iwYMvRVDeNSBKcQ3OEdgqCocZMmpz7Fubgoa9PwL9umgoA+PDnM3Hm8F7h/Yw0roS16KfLCEVlJjAJjuhbiJtnDsMj15wGALj/qvGmjmukJNXpR0fCNspj1llVy39vPj3mPlLes4Mn1b5kZMpMDrGia1MNCWadYF15Lb4sP4ndilIaEm3+ILrnuqO23fnOJvzfqxuw7zjlKspkBMbQ0OZHdaM64lapZbtxxhAM7J4LADhtUHfcPLMkHU0kkoDWAO0QGBiL3/lfi29OG4xh3WIPw3qpWb7YV4OD5OydVvSiMpOBMkBE4rppkVxnDsEoN2L0c/L8SirjFA92iYgmwawTSC/mjqMNqt/afAF0+IPoXRAtmC3aVgUA2H5EfQxhT7TWTpsq6wAAv3t3a9zncyq9wAlbojdIM8bgcgi6pXTi6V23U8A9Z+TE3E8vAviGf6/BLEqPkFSkftfN/O8wymOWGMt+48UL3ztdd2zoU+gJf25p19eeKn3M/vTxdnxKVQFME7575Pyf+fTMd6u2SdmZB/fM0zxG+p2wP1pRmc2hwTGgocmItegSrPYsJUxhlGQ0z+1AS7vaFyiRBbeZLOOSxswfCJK5ymIcGj5mndW0DOudD+/ovrq/9y2MCO97DCpDaD0bt7xMEd/xYvUIbZlgxhjLYYx9yRjbxBjbxhi7L7R9GGNsLWNsL2PsDcaYWuqxCVdMHABAu+ZhdQzBjAbXzEBvwJX8i3I1cpMBxi92KkwgRHrpnutCXat2jcNEyrncf+U4w98lTcg5f16G0+79NO7zE+aJlTJBsiQmkscsFpL/2BnDe0Zt11r8K+Gc6yY9joffv7dFZQKtbmzLilJhNrFkWqoxawcwm3M+EcAkABczxs4A8CiAJzjnIwDUAviBhW005JZZYvZ/rZdBKj48qEeu5rEHT7ZgP/mZ2R49rYkUsu5xagtmRiijuQh7YtRN3fLcqGtRR8UlmqDyDFlAiBaS79DR+jaK5E0Teg7gOaEI6zZZPyTrlR43oBtcDoZb54zCnFOLw9vNuD9874V1phf8bb6AbkaBV9cexJ8+3h617ayHl2LOX5ebOndmEzJjZ2utTC4iSSau0D8OYDaAt0PbXwRwlQXNM0Uk0aT6rZRekKIcl+o3QHz4Zz+eDQ965qP1il41WdSW6uWiMnqv7eJgSsRGb4DukedCXYuOxiyB6+g5fUtQwfv0EetO54fy0TVrmLI7G83XM9+NPQ9eijOG91L5MD5701RcXKI9nwBA6a7jpqN3x/xhIc551Lxvol75ua5K1poyAYAx5mCMfQWgGsBiAPsA1HHOpaerEsBAq9oXCylsXjM6K/QcKwsXKwkGuSqShrAPejLU7ReMhtspaPqLxRK8qLszA6Ne7J7rQq2WxizB+SuWpkMr0IBM4qkh7PxvkPk/xyWgOcXJofNkuQ85gIvG9cPIHsbzydYj9abPX01+zmEWbDmKkrnzUVlrjyo9liaY5ZwHAExijHUH8B6AMWaPZYzdAuAWACguLkZpaWlK2iinqakp6jrHW8TBcsu27ehevydq3+1HxZd23bp1hue86olF2Hw8gBcuzk9uY9OE8p50NQ4faYff59f8G/McHPsPVqK09HjU9srD7eCc696XLUe1B/SufB+BzHtWGhtaEWxlmm2ur2lHfbP6uWhqakWNrzmuv7OpqQm7Nq0HAJw/xIklB9XPx959+1HKKsPfS0tLoyLwMum+msHKZ0USeMvLD6C09LDmPm4WxO4DB1FaKkbZS+WZjI6Jl0uLORZuEz9v27YNeTW7AF8bjPQ55TruMfJ7KS8lZXSPtX6z43OWjGfl3xvEtEeL1oo3fPPmzQgeid9NJVnYIvM/57yOMbYMwJkAujPGnCGt2SAAmk8553wegHkAMG3aNO71elPeztLSUsivU9XQBqxYguEjR8E7Y2jUvg2bjgCbNmL69NOBlSt0z7n5uKgOT0f7U4HynnQ1PqvbAlfNMc2/sejLZejZuzu83slR25fWbwU7WqF7X2Z0BPDyjsVoDmX4dgoM/iDHzQub8cVvZ6N/t2i/xKqGNjy7fD9+d+mYKJNXuz+AH764HnMvGYNxA7p17g9NA5n2rDyxdSW657nh9U5X/ba8cRvWVVeq/p78TZ+jd/cceL2xE4VKSPflsjkc5TXNWKLh4jBoyFB4vaOBhfMBAP6+p+LM4T2BxWIgQCbdVzNY+az4AkHg008wrGQYvF7tYuM91i1Dt16Rdz8Q5MCiBRg2TP+YRFhQVYaF245h3Lhx8E7ojwMfLAGgzp0o0aGjeJXfy3s/3AagXLU9TOgZi/pNa5tNSMaz8krFeqC6CoMHDwYO7MfEiafhnJHWVdqwMiqzT0hTBsZYLoALAOwAsAzAN0K7fRfAB9a0MDZhHzPDSBiGiYO7Y2x/dZ1Ewv4YlWbJcQm6mfuNfBRy3Q78R5bhu1+3SCj8U0v34pnSfVH7/+7dLXh+1QGs2lcTtX3H0UZ8vucEfvfuFsO/gUgcPXOWx+kwyPyfmIeKILCowuZylKbMX76+UdO3lUgeRn6ieW5nlI9Zqv1Gpab0zev8lP3C6vJOn6OrIfW11IvZnPm/P4BljLHNANYBWMw5/xjAXQB+zRjbC6AXgP9Y2EZDIj5m6pdS/qJ+8LOZuO2CUYbnKquoxSOf7ExuA4mkoDdA57gcmpFNZsZoeZRVnizlxmtrD+LRhTuj/IekKE5lCRjpDORqlBqMbqvbKSaYVU7Ine0Kp0P7YVM6/wcS8E19be1BlMydj1aDWoyEufe3wONAU7s6+CPZ0/n5p4q5zUYWi3VV812du8JXh+o0t7f7A3h5TYUt0zhxznHHW5uwrvxkzH13HmtAydz5UQncqxracKTOnO9YLP/CdBFTMGOMFTPG/sMY+yT0fSxjrNMpLDjnmznnkznnp3HOx3PO/xTavp9zPp1zPoJzfi3n3LYeitLqVi8DOCCTxGO87dc8sxr/Wr6PSjXZDKNe65nvxtF6bbNCrPfaIRPMtAqc3/XOZpTMnY9gkMsEMIVgFl7lkWSWCoy0pVJQT7uG1qwzg7pDJy2CPxCMEtbb/cGotBnvlFVizl+XIxDkKKuoxey/lGL/8SZc+6/V4dQ9f18i+sFqBS0QEbiJlAl9C3NQ1RCZmlL1Bl47bTC23HshRvQt7NR5pEXdVU+v0vz9g6+O4A/vb8Wv39xkeB4rIsrb/UG8VVaJbz+3FgCwpbIez32+X3PfhVuPhf6P1KOe8dASnPXIUlPXskvAvBmN2QsAFgEYEPq+G8CtqWpQJhFJlxF7lSH198RBxr5A51MKDRuiPUBPGtwdu6oaw1UAJMwISk5ZvTutJLVvl4mO3r5gMBz5qdSMSdvtMph0RfQmZ0kwM1qUJYJL9lyU3T0n/LkjwFXaWfkkevtbm7C3ugmtvgDmvrMZ+0804+73t2JdeS1e//IggMg4RSXBOs/gnnmorG1RRcamQtNSqJNyScmYfvrCm0+n1mr4Gh7R3fyjTUcM97NCO6+MlLz8HyvxwPwdmvtKAqgQ5zMu7c0V363CjGDWm3P+JoAgAISc8kkXDnF16xCYYboMpvjetyh2XTzCPhgJPQO65YJzoF4rA3yMN9sRZcrUj8HxBXh4kFFqzCTIlJkajARst6QxU/gYdlajINVhFBjQq8CDX54vOpKv3HtcZY4pq6hVHe8PBMNad+kZk54faZyi58UYM104sEcufAEe1kZatTh69Yczwp+/dlp/bL73Qs399PwRJUHG7DMRr6kzGOQJvRPbjzTg0MkWNLT5dBPbap1XcvtwxCEht/kC+HR7VeiccTc1JZgRzJoZY70QEiZD2fnNJ0vp4rgczNAJN7LiNi6MS9gVrl8v0SNqulQaMzM+ZjJfIr2yToAYWCLJcCp/Jq69nUgesUyZWhqzzrzjkjZL0ob+OuSbeuhkK+7/WFtLIKcjEAxPspLQyMDw7xX70RBKPuqPoT0hRIzmdknDpHz305ExXq7xlFeMcDoE3YTmPn8Qa/bXqLZf/o+VOHSyRVPgaunwY9mu6qhtWrnzlu6sQsnc+TisWDg0tPkw/HcLMP2hJSg/0Wz8Rym49O+f45w/L4vyh2QMONkcMcNrtUW+Sc+XsmTufNz6+sbw9yZZHwaVGhWLMCOY/RrAhwBOYYytAvASgF+ktFUZhMshaEZnKVfbseqvEfZFr8vCGcA1BoBY3SzXfmn5mEnsP9Gsa8qUzBMkl6UGo/sqacz0IzMTQ9JyDZSVcps2tAcAoLwm9uQmXyRKps9lu6rx4IKIUEeJaTuPtJiS/PzS6ef5f95TAADPfGsKHALDgFBUt5GJuqHNh+vnrVFt33akAef8eZnmAuOnr2zA9/67DtWNET9arQoUr64RTeXbDkfra2pDQtTxxnZ8+z9rY/1ZmmyujJyzzRfElPsXh7/L21xR04zr532BhpD14vHFu3HqPQt1F63vfxUx2crfB2nRYvuoTM75BgDnAjgLwI8BjOOcb051wzKFxjY/XlhdrhsxInVvXmiF1avAk6aWEcnAaHKWoinfWHcw+hgT55UPBnkGGrNrnlktE8zUkXni9WiiTRVG6TIAaEbldmbx5XE68OT1k/D6LWeEtz333WkAgOqG2HFQ/kAwfH0pMEEpPC7bWR3TlyibiShN9DsyV6NeZrq47YJRKH/kMlwyoT8AYM5YsaamkWB2wV/1c2kC2ibK5bvFxNny5+f++duxaNsxXPevL7C3ugk/faUMS3aKWjWXosqNPDjFbKkoJT96ab1+m/2Rce+xRbuwZv9JvLo2eizWCs5R8smWSKDAK2sOGuyZPsxEZX4HwI0ApgKYAuCG0DZCxord0dnflRP6rJG98dDXJ+Duy07FF7+dHTZREPaGc/2JNj8kbP/vy0MavxrPzvKEsEamTCByfbmio6apHUtDAyIpQFKDocYsFPjz1cHo9APJ0F5eOWlgVJJhqWi2meLlvkAw/ORJQoNyvr73o+34xf82gjDGSMAOa8w60q+1VppLpTqWRvVWYwWpGGl+5VrYt8sq8eOXy/Bl+Uk8unAnPglFQQJqf8tmDRPhc5/vx7mPadfo7PAH8fIX5aY1uiea27Fq7wmU1wd0fd9aDFLD3P3+FlTUNOPej7arfrPasmUm8788hXUOgPMBbIBo0iRC6PkWSJsZY7hxxhAAorP3z88bgb8u3q15TDDIsamyDs3tAZw9sndK2kuYR2/lLAlmSswO0t1DhbCNTJlAxN9Insfsin+sCvt06AUFEMlAu+8lU+bcd7fg+ulDFEckd1T3OAUwpv9cDeudjwMhH56GNn/Exyw02WrVczVi25F63PHWZrz5kzNRoPOMd2XMaKBzdYRlKyb0G6cPwRvrDmH2mL4Jn+P+j9XCicT8zdraVWVeRaW/XXQCXvF/KZryZHMHeua7o/b/z8oDeHThTtMRlT98cX34uderhvCoIjeo3LT5ypqD+PrkQZrHJdtFIV7MmDJ/Ifv3I4has4LUNy2zUEaBxJorjR6+Jz7bja//c3XCdnkieZiJzNPCzAAtpcwwOg8Q0XjIV4VRjrYkl6UEo9uqZ35OhVmZMaY7npw9oneUhuHqf64Of5Y0ZvEK7o98shPbjzZgvYmEnl0Zo1dY0mK2pLiQuRnGD+yGfQ9digHdc1W/rbjjPJxjYnFvpKT6y6faCoSA4rlqUglmke+SQFSYIwr6U+5fjF3HGqP2l6Lb61o0otw1OGAioOCN9RFrRmtHAMcaogW42mbtnH6akfZpJJHM/80AhiW7IZmOnpxltHqWfAIGKl6oz/ecSFq7iM6jJ2T172Tqk1mhAbNnnttwP0nj4dcZPUkuSx16fW+UWyqdWpM8t0PX9BP2MYs3xUFoEpXn2ssmzMixkmCu9DGz2mlczqs/nIEhvfLQtzA1KZpKd0W776gEM5kZUXpE5Xfn6WV7seFgbfgeSvNhqoJTTr1nIS5/amXUNqWgJlFncRJmMz5mHzHGPgz9+xjALgDvpb5pmYVSA2bm0ZIisEb0jVZAUskU+2A0SAsCw9WTB2JQD+VK1dzA8vA1E7D4tlkYZZAYEoiYydt9Afz6ja+waNuxqN/JlJkajNKQSCt/9TGpao02+R6nrgO6JJjJnaSVNLf7cehkS9Q2KfIuS+WySJJRIx8zyZQZGqvt9ApK7T41VJ+5wGPsKqHEpVMWLBb1rT6cbO7AxX9bgfmbj0ZpE6UxSm763XmsAVf/czX+smgXAHW+vVRwoila4Dpar12qaewAa2tbm3Eg+Ivssx9ABee8MkXtyVj0/DiMXm6XQ0C7P6gyZZlx8iXSA4exScPlEDQHEjNDm8fpwMjiQt3BQeKdDeLrtnpfDT7dXoXNirB0adBraPOhrSNASYyTiF4/ygUzznmUj2kqNWZnDO+JNfsjJsYcl6A7XkiaB72Jrr7Vh288sxp7qptQ/shl4e3S8xSvb1pXwzAqM5wuI/re2uGWuQSxjmuOS79smBHTh/XEqr3qnGexONncgXc3VGLnsUb87LUNuOviMeHfJMFVHkiwu0pMhHyiqR2H61rDJcPSGemqV1Jv6tCeaWuDFmZ8zJbL/q0ioUwbpSnTTNJPKcmoMszZKJKESD9GSSOdDqbK7RPv6rmPyRQqUnbqfIV/k3S9i59YgekPLYnv4oQhuhG5smoNcsEoVYqTOy4ajfuvHIchPfOitjsFfcFMQm9innjfp9hTLU6OVQ1t4TFLEugenL8Dq/ep3Sou+/vnmPHQZ3H/DZmCmbFbSjDcagMfMyUv/WA6rpw0IKzVmxrKg2eWRAXyk80dUYKXvFKFkVa/uSOAH70YSYvR5kuf4/27Gw6n7VrxoCuYMcYaGWMNGv8aGWMNesdlK8qH2cwA3SsUlaI81orcOIQ2scZoPY1ZPDgdArbdd5Hp/V2KsHipjUd0Vn9EYsQyY0tc8uTnUb+lws/oZ+eNwE1nluC+K8bjX9+eGt5e1+qL+Yxq5VpTMuOhJfj+C+sw6U+fhrVAWw7X48Z/qwOQth1piCrg3eYL4J4PtqbML4dzHhUBuGrvCaxMsh/u7W9uwlvro9PeGMknjDHkuhyWJJiNxRnDe+HJ6yeHF5TfmKodeaiHcnwxy8nmjqhoxoMyE3mQc5XJXGLx9ipsPxoRKdIx/33nzKG6v739kzNTfv1Y6PYA57yQc16k8a+Qc26tAdaG6EVZGr3cl4USBCpplq3C7vlgKwlqNkarJFci/iby1Bv3fG2s4b5K07eyxM6boQmmzRfAt59bix1HaR2VKEZClhTuX1GjPeGkgly3AxeP7xf+3iMvdoFrsxr4ZbuOo67Fh2odh2gA+NXr6vxnH3x1GC99UYHHQr5CyeYX/9uI4b9bEP7+refWJj1i/Z0NlbjjbTFvutnXN9ftUKfLSGqrkgNjTJWaAgC8o/vAO7qPanuOS8DqubPxhizJMSBqbY2oa+kI1w4FgMpauWAGnPNn7fxlStrSkKqie67+e6MV3ZpuTIvGjLG+jLEh0r9UNioTUcllJt7uW+eMwqs/nIHbLhgZfajs2Je+qMD7G+2pbs0GYq2EXQ5Bs/ZgZwbob50xBEN75en+7lA8bEpT6p1vb8aEexdhQ0UtVu49gfs+2taJ1mQvsfpeKo0TdUyavcB/d+mpMfeJ1zXCKIrzg6/UOa2khYneX77/eBNK5s7XrY4ip7UjoLqHH28+qrN3ajDbhbkuhyUJZhNhya/PDefRlOAcmnnqcpwODOieqxJQclwOXDyun2r/L39/Pq6eMhCtvgB2HossAvcdj68+pkSyzMPFRfouIkZaQWeCwQ/JxExU5hWMsT0ADgBYDqAcwCcpblfGofdiGvknCQLDzBG9MaJvIcofuQy9dXyNGtp8hvlyJty7CA9/ErvAMZEABpn/AdEM6QvwqMmks2YNj9OBd356lu7vygzbWmk0Gtv8aAuZsHJiJLAl9DHqe11fnDSM6x/+fCaevWkqclwObP/TRZhzqnZy0cIEEsQqE4UCosD53kZt92IeDhbQPt/qfaIj+XsxFpiH61px6j0L8cqaCs3flYWwlS4EHf5gUlMtxCpInut2qNNlWD+na9Ij342hCv/EU/sX4f4rx6v29bi08yu6HAxP3ThZtX/fwhwUeJyoamjHuvJalBgsKj0xcjYCwGc7qmPuY4ZHrj5N9zel3+Vnvz43/Nllg3BkMy24H8AZAHZzzodBzPyvroaa5SgHhGT6HDy0YCfG3rMoalt9qw/Pfb4fnHM0tvnx7PL9SbseEY3RYOt2aOcY6+wA3bvAg/JHLsPIvupczkqfIb+OhkOqT5fjJMEsEWJpQbRW1ulSnJw2qDsuCmkv8txOPHvTNM39Eln9K2WbvyzahWG/XYDb3thkuL/AGFo7AiiZOx/vlEWEuEMhk1YsbeLBkEn4Ix0NmfcvpVHfT4aSg7Z0+FHd2IZRd3+Cq59ZrXGkPvUtPnVUdBwaM2nBbHOFmYrBPXPxmwtHoYfMxPn7kPZVqgOr1Co5BUFX0yRf/BlVq3nkmgkJtzlejEySPoWVQ56yypEJGjMAPs55DQCBMSZwzpcB0B4Fshi95J/xdbH51/ueD7bigfk78MX++MOaCfPE6hGpPp189Z6oWePP15yG608fHLVN6RsyvaSnKmpJ6eMmIU1cUtg8ET9GArbSpBw+JkVtMUKvLXluY43Z3745Kea5/7Fsr+Hv8vQa1Y2if9rfluxGQ5sPDy/YEV40alj88cv/bUTJ3PkAIvmzzGq9pBxi33x2DaY/KEYjbzpUB845nindh4qa2Ka0OU8sx5kPL9X8LVY/yp3/I8dYP6nrIS8dNmVID1VtTUmQGVksCilKM6ck5E8a3F117hyZJsyjWAjKg1X0rEKpwEg7d/NZJbq/ZYrGrI4xVgDgcwCvMsaehJj9nwBwxcQBAICAYtRJZHKO5xhp0tWblInkwDk3Vb0hGf1w3emD8cg10er3KYpQ937dcrCrKrqUiV5CSOkZUQ6UhDli9aiyDJupg1LI8zdPw9xLIrmjeuW70c3AyRkATh/W+XxN4azuLGLePXSyFX/9dDeeXRHR5CtL+ADAh5siPmuScFlWUYuH1hrn9gMQNtVvUeT1a2j149GFO3HuY6Uxz3G8sV21zay1I8ftkCWYtf843C3Xhb9eNxGAttB56YR+eO2HM3BjSIDTMmUCwIvfn47BPaO1UR6ZxmycIjnriL754c+nl6Q2P9hrP5oR/qysZSwtUG+dM9JQQNRb5KQTo3QZTzPGzgZwJYAWALcCWAhgH4DL09M8+yOtOHU1ZnH0caxXWx4yLq1SrX+Euj6GpkynhsYside+/YJR4c+Lb5uluQrUi2IijVnniCWU60diW/NWzh5TjPNGR3zNFt46C91jRG12tkh5MMjDlSgExqLelbKK2uh9DYSXNl8g6r3ZXSs+0x9t0i6gLR6j/dwrzVRbD9fjRFM79lY3ora5A6W7jH2YuEzQNMLjFFS+Snb1MZOI/G2Rhv7r21Pwx8vHgjGGs0b01n1+pRJd3XJdWHzbuVG/SeNS7wI3LhkfnW1gaC9RMLtxxhBTPmad4axTImbUnvnuKL/HklA7xvQrjMod2r9bdELuRCsfJBOjt3I3gMcA9AfwJoD/cc5fTEurMghBYBCYlo9Z/MRadXUEgsgRHKF9xW1Ujie1xDRlhgYrZWRksl5tublhZHGhZjLRQJDj4QU7VKk7alskwYw0ZgkTZ0da/TZKk8qAbjnoU+hJqWC2/3gTXl17EF8eEKMtleOgPF0CEL2wVNLQ6lO9Q4CYs0wPvTRCyuCFrynqIwLApj9eiCqNtCBvrDuIC8eKvnuxut7tFMIRrFb3u1nC5aZk2y4er522SYlcYMlxOfDaD2eEtUvSGDOmX5HKr9HlEPDVPRegwONMeNHiHd0HFTUtpgqXA8DZI3rDITD0LvCgOqQV/eE5w9GvKAczR/SKasdbirxlVi2s5BjlMXuSc34mgHMB1AB4njG2kzF2D2NslN5x2YhTEFQas/DKJI6RPdbLLdfKSAKZlj9GRU1z2JGW6DzGJZnU9d2SLSu//IPpePYm0U9DmUto4qBuAIBnV+xXmVPrW32hNpLGLBFidqNeJHayGxIHkgZXMht2y434KJ4xXG1G6ozZZvbjy/GflQfC3wXGosbB5vZowam+1RcW4pTUtvhUUZZtvgBeX3dIc39Av6LBfR9tj9n2xjYfLnxihWr7Xe9sMS1kuZOQXDrdXDK+H2aP6YvbY+Qkk5A/H8qi9meN6I0Zw3sBiGjMnA6mqmQDAN3z3CqfNjNMHdoDW+69EC98bzqW/cYbzrsmN9kr2f3AJXjx+9MBAH1lKTPy3Q6cPVKtERzUQz+K1CrMlGSq4Jw/yjmfDOAGAF8HQLkZZDgEphsZl0zkWZWlyV9Lg3LuY6WY9Zi5ZH6EMWYy/wOpLbx7zsg+4Qi8ob3y0bdQHGzuu2IcvjFtsO5xTaGoTBu4TGQsRrdOyxfJal8j6XmU5CNJY3bD9CGY9x0xZkvyD7pwbHFSr80Yi1ooKvOhLdt1HNc9Vav9AwAAIABJREFU+wVaOvx4etlevLb2YLi8WF1Lh+odulxD0yVHT2O2dGfEVKn3XhqV/fk0ZJqNpTlxO4TwmJwphot8jxPP33w6BppMolokqwlrFOEr9UXvAk9SNU6XTuiPwpyI1ve570zDzvsvxmmhBakWbqcQFihHFReGt2eS5SCmHpsx5gRwCYDrIabKKAVwb0pblWE4BabWmIUG7bh8zGK83FUN7fC4HCjwOMP7Ul3N1MJhPEBLE6G8/1NdnkVqjkNghqYo6dlIZmu2VNbj8n+sxOLbZmGkbNCT+OpQHQ6ebAkHxRhRVlGLnvluDOudH3NfOT96aT2umDgAl5u4RqeIceP0cxcmvylmkZzvJbOhlOHc7WAoynHh09tmobgoB03t/nBJuJe+Px3fef7LJFzb3AKlodUfrhLQI8+F5o4AWn0BlcZXquOph5mKKHr7tBqMm3Pf3QIgds4tt1OIWiwD9jCDJZOiXBdqW2Jr3iWBfMqQ+Opyyrll1nCU9MpH7wI3PttRhTfXV6q0b06HAKfDfEDTuAHdwvUwe2hUP7AruqM6Y+wCiBqySwF8CeB1ALdwzikiU4HDwXRDvON5TWOtti/9++dwCgx7H7o0PPm3aCSDtILqxjYcqWvTDKXOdIz6UFpFqgfo1LVHmnydJgWzp5buxc9nj0hKdObHW0Rn7MU7qjQFs6ueXgUApgSza0I5p8ofuSyuNizeXoXF26tSL5jBeKJNYi7TpCGZMgf1EDUiyslU0iDIozVnjVKX5UkEgTG8Z6IodJNszJI0G0aCkhz5e/ar17/C3hjCm97Cdcex2GXKYkW0ukLJpQFkjpNZnBTJtFVG09ONM4aiMMeFr08eGLV9ye3n6hyhZnpJT8wJaXElradegI3kQuJ2CnjwLA/uWKEdxVsY0vi5HCyj5iYjjdlvAbwG4HbOea3BflmPkY9ZsvEHORrbfOFJocUmdTQvemIFalt8cU+ydieWsOzW0JilepCWhiqHwMIDjxbymqsHa1rCgtSuY40Y3U8tVMmpb/WhsrYF4wZEmwwcCo1MVybWX6hpykxNU0zTLdeFZ741JSlpMABRoyVpTGLxVtmhqOLmWpYEINo5vyYUOdzqC5jydxt/b3Si7aeWGudYu+CvyzW33xmqi2lELMFMU2MW86yZxR0XjQ5rU5XRrnLcTgHXaBRLP6WPOkF2PGimpEFE4J4xrCf65OmnVpHMl1JAh5w/f+M09C6wpxbNyPl/Nuf8ORLKYuNyMPj0Cq92Il2GntahoqYl7PzfonCwNZNUMRWYHbwzEsOSTGrn/xiHdL45ksbMwaL8L4DoHELyCVByBl+6swoX/W0Fnli8WzOHk8Q3n/0Cl/1d7eMjTZ4Z5vOcMIY+ZjZ0/geASyb0D+dpygv5cClzOukhadp65LnwwvdOx4/PVdcD1UMulAHAngcv0dxP/lzKXTK0ojKVKAWhWDS0xWdRkEceFpkRzAJBcM5T7r5gFbNG9cH5Y8QULLpzXJKQLzIlS4AUWa5k6tAeuOmMoXjsG2Jeth460ce5IcFMy6R93bTBmD0muX6WyYLCtZJAntuhUpkn9JoqDvr7DZOx98FL8K9vT4naXl7THNGYya577b9Wm0qqSJgnVj9qOf+neoiWgqMcgqDSmMlND3KfHcnULgljTy7Zg/MUJW7k7DzWqLldMqNKgt5N/1mL2974ynTbNxysRW2z9mBrhnQ618e6ltavdnMCv3rKINw6ZyR+dt4IU/tL/TvvO9PgHd23U38PY0xzwmzWMC/e/f5WbK6sS/xiSWDioG5R70wsYdYdXpRFjuliLmYAIrUz9XJ1JoNrpgzCdJmWd1RIo19Zq60NczkE3H/VePQL5SBbeddsbPrjhar9pByOWkFydoYEsyRQ4HGiUcfXq7PpMpwOISqrMiAW85VWL62+yHXXlZNyM+lws+ky0jcjS8+UU8OU2axT7N4X4HhowQ7sqYr45DS1+1V1N+taOvDL/20Mf1cKJ5LGTDJlfr7nRMzi1PJzXf3P1bjh39Gldtv9AZz3l1Isi5H4MxjkMR3Ck4kY+GG0g15SafvMzm6ngFvnjIopZPTKd+PcUX3CEbzSguPSCWoTUDzcOGOIaptWkXQAWLFHP2dZOuinSDQqRT/rIfnzKaNPuxr3XjEO158+GOef2jf2zglyzdSBUe/NFRMH4MYZQ/Dz2eYWFPkep6bpWdKYmRHMrpw0IKwxthoSzJJAQY5TPdgkcel8QmFyOnCiBY3toulQylWlB+cc+46nbzLrihhNtJEEs/JUJumLylROuNuOaDs1f7GvBvNW7MdzsrxTADD67oXo8Aex/3gT2nwBzFuxP6pMTpAD7288jJK581Hb3BExZSbwN0qyq1IbV93QjgMnmvH7UDScHs+t3K+Ze0rJm+sOYU+VtsZPzr0fbkPJ3PlYY1Bv1kjE0lIgZKpJq+wPF+DF708PO1tL0XBDe+VH1To0yz+/JWr5r52qTufSpCOY9UljHUUt+neLTMqLb5sVU5gNa8v9QdtpSpNJ38IcPHLNaXEHDykz6iv50TnDwp/diiCVHJcDD319gum0HnpIPmZmgkuevH4yVt41u1PXSxaWCWaMscGMsWWMse2MsW2MsV+FtvdkjC1mjO0J/Z94/G2ayHc7wzmjlMSXLkP77b54fPSqtbqxLXy9BVuOGZ7zvY2Hcf7jy/H5nuPmGyKjZO58/LPU2MFWjtV5nJJNrIlWL49ZKpUmkp9NvtupirrTKyfy6MKduueraW7H7MeX4863N6v+2kCQ47+rywEA+080h01dz5Tuw06DyDatKGV56ih5AmTpkTlS36Z6ft4uq8SEexchEOSqEj963PnOZlxgQoB7IfR3XT9vjebvsR5l3ZJMMa9sXwSZ/6KE38DpW0L53A0OJe0s0UiF0qKj1f2yXDv5rBZ/vW5izIk/XiSN2Sl98jUjjpVoacwyue+Tybb7LsKy33gN97n9wtGYMFAMLkok+awZpMLsyc7Zl2qs1Jj5IUZ8jgVwBoCfMcbGApgLYAnnfCSAJaHvtqYgx6laBSZUkklne2GOC3dfdmr4++6qRtPO9psrxQK/chOWWSRz1Z8X7jJ/TNeSy8ATMGWm+hZIavmiXPWKXi9tixE1TaLP1+p9NaoSX0HOw9qTIOdRv7+rSI1Q0xTR7GrlszrYENkmT4DcJjOn7jvehFF3f4LHFomC5B8/2IrGNj9aOvy6g/fhulY89/l+zd86i5G29NpQFJrkYA/Yz8csXiJRt5FtymfqrovVWdeVLhtGyUg/NKh/aZY+hR4crVeXVOoM3tF9cPdlp+Ktn5xlan9Jy9PhD2aonjR15HucMRO6yqNwUyXQ9sx346t7LsCtczKrWJFlghnn/CjnfEPocyPEagIDIRZNl2pyvgjgKmtaaJ4Cj1Pl2xMpyWQe6Zi/fXOSynwgyCYIKfppQBwrRr2BY/W+EzjRpB2dl4i5yszqOtMw0n5FEsymLypTCvgoylH7VCgLCJvhxZDmSGBQPSgVNS3hydof4FECl/xv9AeCmPrAZ+HvWoLZw19qT6TyiCmBMXT4g3h62b6ofYJBwKWjofr+f9fhgfk7cLS+NalpPGJpS3NcDtwwfQjy3AoBOYPVJtKzLhfAlf6Tbo3Eq0o/K62yPBJbD8fOIRaLHnn6aQ5i+YYpOb2kB175wQyM6VeEH54zHD1NJiLV1JjZyL/Q7uilwkg23fPcutptu5J4BdskwhgrATAZwFoAxZzzo6GfjgHQ1EEyxm4BcAsAFBcXo7S0NOXtbGpq0rzO8WMdaG7zRf22p0LUaK1avRpFbnMPxeXDBby5K4DC2t1wCAylpRHz0/4KtYas2OOD0dqztLQUhw+LQteO3XvwvU27cc1INwpk7bl5YTOK8xgenaWuF9YhG5D17q/ynpQuXwGPwWo506g+3obm5qDu33+iVRyUt2zbgR71osm3qqoNwaD+MZ1FMmVu2fAlyhXP1hXFdRg62YOnNuqnwlDyVlklAMDv60DFwYNRv130txUY0V2cgL7csBF7ayOT0MFDkTqGC5dE54tavmIlCtwMFQ0BuAWG/gX6a8A168rCn1eviWSgLy0tRSAgCm2ln6/EiePR0ZzS/a2qE82id760At8dF5lU3/lkKXrlitdtaOcocEcvcOQs/GwZGACPM/J7W2sbqqqqDPux6lg72tv94X1aW9tQdcz4GCV644oV9HW2YyeA7ZvKcGKPeO8K/RzTih1YXyX2xYF9sV0bytavw+H8UP1EAUh2poXtm8o0t4/qIeBkW+yo324ehvp2cXwb6m6C//BWlJqLYQmz66i4GF+z9ksUht7DPXv2oLSjPL4TxYGdnpXOsmLFcjQ2ilGXZRvKULsvsQTYXemeSFgumDHGCgC8A+BWznmDfMXBOeeMMc1lK+d8HoB5ADBt2jTu9XpT3tbS0lJoXefLtp3gFfujfitfdQDYsR1nz5xpegXm9QJ/1vmtck0FsGNr1LaJIwZjY3W5wfm8KG3YBlSU46s6D/ZWN2HQwIG4/8LxAEKmyoULUNXC4fV6EQxyvLC6HNedPhgFnpB5dvGi8Lm0CN+ThfMBAGfNPFuVWyuTef1QGZrQDK93lubv1Q1twPIlOGXkKHhnDAUAvHt0Iw7UH9W9Z50mdK8vPv9cUWMX+u4QGObMPg9zADy1cX7cp83NycGgQf2A8ugAgWOtAoAgThk9Fk2H6oD94u9DhgwGDogmxJ8vbYk6Zuzk0zGwRy5uvnshgFB2/4XabTp1/GnAWlEga+k2FIC4IPF6vXAuWwQE/Dh9xhlY07wHOBwRBqX7K3y+GGjvwOeH/bj3+pnAp6KQ2FA0DNfMHIZj9W044+El+PUFo/DL80dib3WT2G9YGz7XTz5rgdspYPcDkdxbnrVL0a+4J7zeSbr3rLRhG9ZXV4bbkrNmKfr16wWvd6LuMapz6IwrVnDm2QFsP9KAyYrSOhfNEf1NAWD06FGqsUjJWWecgSG9xMXekgnNcaXxueOi0eGSTct+48WD83fgsx1VAICx/YvQp9CDqy+ahjtXfBJ1nENg+PSuSzDrz8uA1hbVeSV65rvRt9CD+lAQyrjRI+GdOUx3fz3ath4FNm3AlKnTRC3d0s8watRIeM8siftcZrHTs5IofVZ9huON7fB6vSjauhJoqMfUKVMxMcHM/F3hniixVDBjjLkgCmWvcs7fDW2uYoz155wfZYz1B2AcQ28DnA4x8z/nXKXKTpbuSMs0EE/WYknLIj+NMpPzsl3V+NPH27HveBMe/PoEBBJIAdHVLJmxzFlOWWRW5JjUcs2UQXhnQ2XYjLrijvPQEQhgWO9Ilu0LxhZj8faquM57uK5VFbUJRKLoNlTUYfW+SEoDI0v37MeXY6Ks0LCRiVFeUPqhBdFBClIwgC8Q1H225BGx8iSlko/LkXpxVb5kZzUuHt9PN7JTM3lpjBfYIbAu5VfpcTpUQpkSyQQ1qrgAu3V8V+Wm0KG9IgEAbocQM73Ez84bgSG+Q5h+xpkoLsrBUzdMxoMLtmPS4B64ctIA3ZqNUrukIfiTX52D3gUe1Ld2YM5fI30u+itG3EByEyxu7QhFZAeCmRqLaw0f/fzscEmsW2YNx89f24iSXvHVy+3qWCaYMVGC+Q+AHZzzv8p++hDAdwE8Evr/AwuaFxfOcDZ0HnZ6TfaLqmUjHxhHzpWDJ8UVpNyBWunU29Ammkul1B+J+It1NR8zzmP5mIX8rxT3MpXG3Me+cRoeunp8+LukmZAz76apGPbbBUm97vOrooW2WFnYN4UCTwDg5hfW6e6nF6X37obK8H31BYK65cfkt17epsbQ8ywJdwKDqXQbb6w7iNNLeppy5HcITO1f2HUs+ZpI413/brm6gpmef2qOK7ZgBgCFbobiIlF4ynU78MBVE2IeIyVelm6/U2DoU+hBH4XPWZsvGOXfJCVQjRfpPsjf/S7e9UmhX7eccATs104bgK+dlvqat5mGlVGZMwHcBGA2Y+yr0L9LIQpkFzDG9gCYE/puaxwaL6hEsgbp4Yqw8//efDqG9Iy9yvhIEQElj5ZSOvX6/Dy0T2QlGC/SgPzW+kN4a/2hGHtnPtLqPZ1JJgWBxcwppNTcTh6S/AK+8fzNK3brp2v51evalQN+/eYmtIcErQ4/R6uOACcXjOQJc6sa2nHvh9vwy/+J5zfrbHzXO1tw8ZOfA4idIFpgrMtpifUY2D0XU4f2iFp8fvKrc/DUDZPD+1w1SZxku+uUM5IHDvzlWrW59/YLEo+ei2jMxP+NHL7lEYF6foexEMIL8q6dx4xIP1ZGZa7knDPO+Wmc80mhfws45zWc8/M55yM553M45+aT21iEpDWRCzLJflGnlfTEoltnhRPundKnQJWUT8nJ5o5wkWCJpTuqw+YopeAlTbQurcLcJpHOecfbm3GHiULBdifWHQivmuXpMmw4Sr/3fzNx4OFLk3rOeOsWdgZfIIjmdh2NmawZ8ja9tf4QXlhdjsN1oilzvYk8aNLzK50n1pztcjD4gsFwn9ux75PFqrmz8c5PzwrXMWzrCODU/kW4XFbT94GvT8CXvz8fvXSSxcoXFBMGdsOEgd1w/1UR7e8vzh9puj2Lbo32+xQUpkyjvjBTMD0WWu9+l1eXEmmBMv8nAcnXQKsIbzwlmWIxul9huGREjkuAy2l87in3L1Zt21PdhBv/LTo9y31zHl24Ewu3islqpRpwmw7FX7vOSMv25vpDKKuwvZytwigE3iEwMKaRHsIG4/MHP5sZ9Z0xhn/cOFln7/hJt2CmZ8qM1phFPsdbwBoQk+1KSAKdER6nAM6zy5wlBfe0yMrB/fkbp2FY73zkux3oW6hO4zM0ZG6Xa+ydDoaPfnE2bjpjaELt6FcUfR2pLJ4koOkNRd+fOQyPXxsJ6Eg0xYW8CgZ5mRHJxPKozK5AxNcg9Q7g//zWFKzeV4O+RTm69TnNIp9MnimN5I2STJk/fXVD3Oc0EszuDGnQyh+5LGr7exsrseNoI3536alah0VR19KB8poWTIozgqeytgU9893qnFMxiKUAYYzBJQhpTTBrlomDu2PBL8+J0g587bQB2HK4Hs8ujy8ha698t0r7qldaJxV0BIK6pkz5I9feSWFx+oNLor7HmrIl01y7PwiXQ7BN36cSqT5ri6zMzXXTBuO6aerySxJv/fhMbD/agD9+uC28zSV0Ti+g5xsm9Zneu3vP5WMBiHVAF2w5hkSVZ3LfYuW1CaIzkMYsCTg1TJlhkvym9irwhE0HsUyZRjzw8XZNDR8A3agnMyTil3bbG5swb4U5QeHGf6/FVU+vivsaZz+6DN99/svYO6rgMbvQ5WDqkkwJXCkVjB1QhNH9osvL/PaS2AKwkismqR10y080J9yuePEFeJQgAACLtqnLkSmLsqcayTQn1x52dWuWVCy6zUT9QYm+RTnwjo4ugq2VqDYe5OPfw1dPwNs/ORMAMCpUTklekUGLuy4eg3NG9sZ5oxMrzh3lW5wNEjmRNkgwSwLSyskXTK+fUWcEqOdWHtCNoHR3IkFsvIJZrCLsSrYfFcOsE7m/68rN1VpUEmuidTqEKLNwVxykR/ZV1w7cn07BzB9UFSL+8ctlqm3JNq/GCnCIaMzEdnRhF7MwUnFvPdOyWQpyOmewkZzvzxnZGzdMH4JpJT0BiGbVl38wHYN7qqOV5QztlY+XfzAjZrFyPZwhjd8rX1SEt3V1oZxIDySYJYFwPhstH7MUvqh6BavNcs8H22LvFCfxlHHyBYKYeN+nCV0nHvmvM0KymUNdDiFKKAfsozFLhNljojUI35tZgm+ePhg/OfcUfOfMoVhy+7nIl2kjChOc2AAxmagZjtS3qkypgNqPsrOmTCV6AQcSHkkwk+ViS6ZfqR0pCglUP0ggKav82cpLMH+YnC9+Oxv//s60qG35Huf/t3fnUW5UZ9rAn1dL73a73e3d7b0dr2BM4wVDaGMCBmPMFmICATKEbVgmA98kLJOQBEg8mZmE4UzOMJ7ABJKAww5hGfYmIdhmX43BbWywDcbGC7i9dLda9/ujqqSSVGptJZWk+/zO8bFUklpXt0pVr+7yXhzZMsjx+Y9fcWTO72mxWsyeXbutHH+LkYcYmLkgmsuqsPPmgzl2BbzY8YXj9u5eFZkIkKlk3aNO4rv/7PcfeH1zn4sdZ9Iyl20C0Mfe/gzPrt2W3sy8mASzpX2anjehKXJ7ZEM1rl88FX6f4OrjJ+FnS6Zh/KA6DDYHXlf4fZg0LLE1LZn47qt0f1z85BHnHxH7e/LbYrY3xTi6SFemeeyW+r5PR8Dvw8ZlizKaQWm5zjaO1I31C4fVV6dcLNtu3CD3Epk6LdRe7kE5FQYDMxc45THLZhHzTNnHWHx79ijX/m6oN4zfPJ96PTwAuP/DbrxkC/B2OrRqJH2fuIjJfpG98p63cMXdb8T8vVc3Rmd0hjNoBUs3YF73+R6MufqxyGzUS+8yJj+kOtkGzZUfYpTo+fmfF02O6cpO1l1uHXuVcYHWOXNH48iWJqeXGM+P+3vppmSJf9oPFjq3tL2y0Z1ZvyceZCwGnyz5raXCqcWsRPd9IQSSHE+nzhyB681B+fnkRpoMS7b5z4hSYWDmAsd8NqZsp2Knw37RvOnkadi4bFEkz1kuQmGVMIj61hfW48V1X2D69U/i9P96CYDRmvDnj3rw7d9G1xw8J4MB9vH1tcchvcHMG57G5l3GqgWvfxIdI5ZOYNbTa+SXSrd17QUzCerDb/a1NHyigF9ixiKVwjijK5Mk8vzekeNwxmHNaBlsLO+U7EJmBSSVQV/M5104bSgaaqJLhZ10cOykAX9cK0MmLawAIjm0mhucxw89kWVLLwA88PeHR24PqDEGuKeaeVqp4RizfPjVGTPw3Sy6RjOVbpLhdNiXyeN+JzcxMHNBwLZmmqUQXRr2i6ZEcvfk/r7dveGEpVaWPbEWZ9+2Gnu6QpFEnV90djm9PMaYqx/D//5tg+NaiaG4rsx5y57DyvU7Ep639csDAGJbJZIFWz9++F3c88om7OjsQst1T+COlzbGPLevIK3S7BI5EBeUpjqXV8QP/kfxN5hdsaAlIW2JpTLgx7WLjC4npzVagWg3VFcoHHvMKaDRtobrv58Rm909/u8l5H9LoaHWCJgaM1gn9pkrnRegt5s3oREzbWtEWmv3fbwj+WLYQDQw02lWZilzo/vU4vSjhfue3MDAzAVWK0D8ouBAYS7Qlx89IXLbukgu/86hWf+9j3ekN9suWWC2K64788bH3necFBA/YB4AXuxIXLrn9FtXAogd2J2sd/LOlR/jB/e/jV8/8yEA4OG3Po0Jxi7+w2vOLwRQZV5kD2Q42yzgl9g8ZiX063n6iHqMH1SLSUP7xQRNVstCsq5Mq7t3z4FQzE8QBcSkRYh/ffzFLH5ZsFT6m8lNfSJYeljyvFl2TUmy0Fu+1dqMP35vTsy2CWaLYf8kSwtZ7HnMgLKckOu6Z648Ck/9Y+pgudjZx5jpMLaQCocJZl3glGiwUBfn+FaPb88ajV8/82FkkdhMVQZ8kQXPU9m+x3k82SFxM+V6wyqmy+rRtz/FmMZabN+TGNj1NZ7L3r2aavbnH1Z9AsDIUm4fx/T0ms+xrzuE825/BTecPC0mx5f1rPjALFVwHfD5ijaPWSqPXDYPYWWU116jVgCVzZicoyYOwjNXHuU4EH9ofTU+/yq633vDYVQFfehfFcQ2h+MhnpXctCsUxrLTDsLUEfX40UPv9vmabNLKVAR8uOuC2SmHBliD/2O7/ktl73vDCnpLnWOLmQfloPLDwMwFgb6WZCrwN/WKBRNw6fzx2PrVgci2Db84AWOveTyt19dWBrBpZ+qlaMZe8xguOWp82uV66M0tkduX3fVG0udt2rUvaXoLe4tZb1ihsyuE5S+sx4NvbsFd35vjmLeoX2UgofvylY278PLGnbjxsTX4/fmzI9utgMzeZQog5U6s8McGZqX061lE4DQx0hdpMUt9AMf0ZJq37RffX552EA6EerF5QwdqBw+OWeqrp1fh7euPgwjQct0TKd+rrtJowbJWAfjOnNGYOLgO31q+KulrnGbPpVLh90XyYvX5PHNZtO4MW/6o9AVsKxeUUis5FT92Zbog4JAuw6vvqYgg4PdFlh7qVxnIaAJCqmzZFqWAlR/FjgfrazbeNQ+8k9bfffjNT5OmybC3ZIWVwk8feQ+3PNeBTTv348hfPo9/uvethNd8+uX+hJl/VndvfMBmJStVcY8lG2dlEQFWfVR6a4D2xaqjQBrL5sR2ZSYe+Wcc1oxz5o7B4cMDaOoXOzasf3UQFQEfgn4fbjplWsJr41nHp30Gb6pWvWyW/kk2ezDheZEfZWZXJi/QRem788a4/jc5xozyhYGZC5zSZVi8ymtTZa4jV1/jPEbmtJkjHbc31hoXTpHERKPx3vgk2vLRMrgON39rRh/PTt+GJBnlD9hasp5a83lCwtF7X9vsWMbbX9wQs+3GR9cAcAjMzIt9WMXOSh2VIoP46g1GULZ2q7UqQZ9PLwnWsZxWa5PtA9enGJNljfeaNLQfbjplGi6dH211PWv2aJw5q++0L1ZamENHRVuznAZ0f2PKkJjH7714Ln5//qw+/7ZduvnVIj/KbC1mvDgXn+sXT0062SVbMbMyXf3LpDsGZi6wcjrZu8C8vjjXVARw48nTcJc5qPnCr4+LefxrQ+vw8nULEl5nJQ7tVxnAtOH9036/yqAv66VN4g3pnzg+7tG3P8VXB6LLN/3ooXfTntF3W1xgZgV0qzfsxCpbq58VmO3v7o0ZZzeuKb2klPYJCflMk1IIVgtQqtZCIHpRuuobE3HQyL4Xl28yZ1N+ub8HZ80eHRmjla454xqxcdkijGqMBstO+aT+55xW3Hfx3MjEmMPGDMQRE5pw5qxm3H/J3JTvk+5i99ZqUb5oAAAd40lEQVT4tWi6FF6ideE8xqy0v/dUHBiYuWC4OUDYyrcFRLt0vLw+nz1ndOQCdu0JsQtXhxUwuF9VQguF1aLRvzqIKxa04NSZI9J6r8qAPyHZaLacTniX3fUGdu2LXVdz1770k9na7bb9naXLV+HFdV9gzadfRboy9/f0YuHNf408Z0iKiRQXHWUEvVb3Xzlcmq3Zkul06Vk/QuIXqXZiHV/JEhHHf19uPTv17OL43FTHmq1lrWMG4qpjo4loRQS/OPUgHDo69dixYWlOnrECM3u6FF6a9eCPyWNWDt96KhYMzFzQUBNEfXWwoIs658paPDw+BrJaNOoqAwj4fZgzrhEAMKR/3ykHAj5xrZUo2bI6HZ/vibm/7avUs/jScfZtq3HCLX/Fc2u3AQD2xS2Mnap7bvZY40Kfbhb7UmCNl0zVpXffxXMz+hFiBWbprmm5cNpQTBvRd8tt/BCyW848JOXfta+U4VTudJf5iY4vNYPy8jkEKAXH1mRG5eQCBmYuEBGMGFCNz7+MzoQs5hP09Yun4PwjjCzb1kWpsbYCa29YiJYhRvqIT3cbMzOtVjArf1Qybi51kiww2xsXMO2Oa0HL1eZdxmfu2BabXNdK0ZCMPy7BcDHv+3RZwejoRudu3IcunYervjERrWMGZvR5rcH7ydYsnDzMCMKabAlkH72874Wn7V2Z/aoCaQVVPz9lOn5x6vSE7T9ZPAXnzB2d8vUWa2JBD8eYaUdEcIXZVV4O33kqHkyX4ZLGuoqEwehAcZ2k77pgNhprK2Nyd5108Aj8YdUn+NNFc1EV9OOoiYNw2JgGnH+E0T1nddXELz4dz83A7Gfm4PxUujPMGg8YgUF8i1gqTmPe7Jzy2BXRbs/KEROacOvZh2LBZOfuyRnNAzCj2RhPFlkXNo0PLSJ49PIjknYVnj17FA5pHoCpw/tndbHL9Tg8L8NlgaKD/5lgVkfWxJNCrI1M+mCLmUsG1lZgx153utby5fDxTTFBGQDMGjsQG5ctiuSdqq8O4t6LD8fCaUMBRGd3plpv0rogXhQ3ycBLKy6ck7DNvo5jOi6dPx7jB/WdEDM6K7d8BoCLCBZOG5pWctafnzods8YOTDtx6LQR9WhMko1fRDBtRD1EJKvlc9KZrOAm6/16YoJyXp51YY1vTJXwmigTDMxc0lhbiZ2dDi1mJX6SHtdkXGw3plimyTpBLTpoWN7LlK6xDrMpU40Xi1/m5/hpqT+PdXG2r2RQTC2l+TajeQDuuWhuxjMs88HqCk3HvPFG3r3TDnVOHZMOEUHAJ7Y8ZrxA68QXaS039n+pz8am4sDAzCX9qgLY290bWay7XE7QVg6vU5PkPbNYJ6hMF6XOl5XXHI1BdZU4ecZwDOoXbZ3pX9137338EjzVaSTctVrM/mHFmwA43sRLvzlrZtrPHdVYg43LFuGwNDL89yUYt/IDr8368EcCM48LQmWFgZlLgnGzsyylfpL2+QRrb1iIG5f0nZXdajEbkGFXYb4MqquEzye4eekhmGW78FakaNWJX2M0nYHkbo6vo8xZgfCkof1STlLJh/hF7Ekf1nnPGsbAMwG5gYGZS6x8T9YXtJxaTaqC/oTxPi1x44nqzJmL4wfV4dmrjooJhiyLDx6eUzlOOSR1TjUrka49/5Y9e32FQ/oHeyvZwNrYwDKYRtAVH5iV0a6nNAT9vuj33uOyUGFZ58UwW8zIRQzMXBIZBBz3y7kcf0FNHd4fNy+dgSsWtGDecCMga7SlNxg/qC5hzcQfLpyEYxxm+NWmuTYnAPz7Nw+O3K6vDuIv/zQfFx01DifPiAZ8154wOWHpFXsWd6fZpc0Do4FZQ23iWo6pOK0nWY77nZzt7Qrh+bXbI/e57/Vh/c7j4H9yE9NluCQ+ZUI5fk1HDazBJzv34dHLj4CIYOrwelzQYSx31FQbO8sufhLn9BH1kVmrJx08HJt37cPrn+xGdYU/IT9ZMvZWuzFNtRjVWINrjjdWNNiwYx8WJ5l4cPXxk3D3y58AMAaqP/7OVtywZCoG1FSgqa4SIwZU4+v/+jyA2Fmb6a6tx65Mb40bVIvh9VW4btHk1E/Og65QGFt270dXqLesWsopNX/C4H8vS0PlgoGZSwJxS7NEczuVzzf1vkvmYt3nnTGf6fDhATz9cSiSXsMStl2h/nzZEZg+sh4PvbElss3K/B4/uP68w8fgdy9tTHjv7x/TEnM/vkfy4UvnJS13fXUQk4b2w9qtezBlWD3W/Oy4mFa0zq5Q5HZDTRD3XDQXA2vTH6sUn6KhXCZ+FJO3rj826a+dqqAfL12TuO5roVkLmZfTd5765uPgf8oDdmW6xBr83xM/+N+LwuTJ4H5VmDehKWbb2Ho/Ni5bhOaBNTHb7dUwfWQ9gOj6oT4xAjAAmDg4Nq/a5GGx9wGju/P7x0yM2ebUfdiX6SOMMuzY25WwQHWFbTxa/6qgmZMrsRzJ2FvMrFm55bTfi0F9dRD1NYUf2J+JUK9iUK6ZSB4z63vPLz65gC1mLrEChWgGcL1P0E4XKHsr4jdbm/HN1mbs6OzCLc+uwx0rPwYQXd7IzqkFIsO4DP984hTUVQVw7JShCY/Z14PMJqmpPTDr7g1rvuf11cMR4NqxlgMLMyAnF3naYiYit4vINhF517ZtoIg8LSLrzP8bvCxjuuIXM7bo+gvK6TzltK2xrhI/taXicEo0P8rWGnfn380CkHmLWX11ENcvnuqYl0xE0FRXicPGZHeo2bsyI/msNN3vOvr5Kcaam71h3X+O6cfniz3vl3pCcSoOXreY/Q7AfwK407btagDPKqWWicjV5v0felC2jERbzMpnIetcWL8g77pgdmSbVSVOp66zZo/CkS2DIutfThnWHz9ePAW79/Xg0NHRgMma/eT2gPu//mB+ZPmpTPljAjOl/b7XjfWjzArKdf0xpiPrh2Q4xZJ1RJnwtMVMKfUXADvjNi8BcId5+w4AJxe0UFmKPzlbdB0IfK45hmyKbYkc1cdKvzedMh0Lpw2NtD7VVvoxZ1wjFk4bGpO5v7c3P4FZdYU/631lb72LXJxdKRWVAuuYDfWq8pyOTUlZXZmbd+8HwKCc3OF1i5mTIUqpz8zbWwEM8bIw6dIhXUYmzmhtxhmtsetORlvMkp+9rBNdsiDJ6jIophQVftsYte4Qx5jpJppcmt1ZurF6Bn700LspnkmUvmIMzCKUUkpEHK9zInIhgAsBYMiQIWhvb897eTo7O5O+z5rtRsqFl199DbvW+/HxRmNB80KUy0t91Um8UKfRmjRKtid9zfvbjHr86svdjs9Zaz6+/8sdRVO33bakwi++tAo7d3YjHO4tmvIVi0yOlVLywVbjmFy5+mX0hELYvHkT2tu3pf36cq2XXJRKnfx1fXfM/ffXvI/63evy9n6lUi+FVI51UoyB2eciMkwp9ZmIDAPgeIZTSi0HsBwAWltbVVtbW94L1t7ejmTvU9HxBfDaakw/eAbmjGvE690fAOs7kj6/XPRVJ06+fWLfj6sPtgGvv4KGAQ1oa5uT8PjXwwqVgz/CWXNGebIuopPuUBh4+gkAwCGtrXj88/exb9vOst/3mcr0WCkV3e9tBd58DYfMPBSBV1ehubkZbW1T0n59udZLLkqlTnb024wH1r0VuT95ymS0zUi9dFy2SqVeCqkc66QY85g9AuBc8/a5AB72sCxp88d1ZQIcb5ANKy9QskmXPp/gkrbxRROUAbGzMrtDHGOmm2BcVybp49SZRhBmzejWdUwxucvrdBl3A1gJ4GsisllEzgewDMA3RGQdgGPM+0XPGmeyz1xeiKfo3JTSOB177jNenPUTSZXTG2aCWc2ICEYMqI4ZzkCUK69nZZ6plBqmlAoqpUYqpW5TSu1QSi1QSrUopY5RSsXP2ixKVpLSC+58FYCRLqN0QoviYQ2mLbUfntcvNrquosvyeFkaKiSrtfz0W1diX08vv/eaCfoFPWwpJxcVY1dmSXKaJchm7cxZDQ6+Equ7yWZakFA4zDxmmgnasiJz3+sn4PchxFUfyEUMzFzSeSAUc585wLNj1VuJxWWxuaxIK/GL2JfasUu5CfgEPWwpJxcxMHOJtVB3v6roRFd+RzNXqi1m1hhDY1keBmc6CTqtI0baCPp9CYnFiXJRjOkySlJNRQBHtjShs8toOWOXRnassfOlFZbZWswiSUZJF/HDGDiEQS9BvzAwI1fxp56LAj6J6cri+Tlz0cH/pVV5fp99Zp7HhaGCCvpL61gldwX8vmhXJn+SkQsYmLnI3qTNa3N2RjZUAwBaxzSkeGZxsS7OkRYznp+1EYhLusddrxe2mJHb2JXpovixBvz1lLmpw+vx3FVHYUxjrddFyYjfZyUZZYuZbqor/F4XgTwU8EXP+/xBRm5gYOaigF8iLSa8OGdv3KA6r4uQMc7K1FdjbUXsBl6ctRK0dWUSuYFdmS4K+HyxF2aeoLURsHVlclamXgKclam1oF8iS/HxlE9u4BnFRRUBQXdkjBkvzjrxc1YmmTiEQS8MzMltPKJcZLSYmWPMuCSTVoLmGLNezsrU0uVHT/C6COSRoC1dCseYkRsYmLko4Ge6DF3542Zlkl4OGTXA6yKQR2oqOfmD3MXAzEUVfp+tK5N0EozMymQnto7sK1XwB5lehvavst3jzqfcMTBzkX1WJsCxJjqxJ5gFeHHWTXz2f9LH0Ppqr4tAZYaBmYsCPh96wwrhsILiQCOtxCzJxF2vHb+9xczDclDhDelfGbnNH2TkBgZmLqoIGNXZE2ariW58PoFPonnMuOv14mOLmbYGVFekfhJRBhiYuajCnDbdHeLMPB0FfD4O/teUnzPztFVfHYzc5q4nNzAwc5E1O2dvVy8Afkl1E/ALesNh5rDTkI/RmLb6V3MBHXIXAzMX1VUaX9DOrhAvzRry+4RLs2gqpsWMP8m00q/K1mLGAJ1cwMDMRbUVRmC2tysEgF9S3QT9xuQPdmPrx8/vurY4I5fcxsDMRXVV0cCMF2f9+H2CECd+aMlnO5Ny3xNRLhiYuSi2K1OxQ0MzAZ+x8gNjcv2w1YQAjismdzAwc1GtGZjt7Ta6Mvkt1UtPbxj3vrYZ+7t7ues1w65MInILAzMX1VQYszI37dzPrkwNfdHZDQBY89lXHpeECs3nY4JZnVnnfsbn5AYGZi6qNBPM/urpD7FjbzdP0ESaYIuZ3qzAjMgNDMxcVBmIfjl37+v2sCREVEgxY8wYpGmnxpyRz11PbmBg5iJrSSbAGAjOdBn6Yi4rvXBJJr2xxYzcxMDMRfZfzRxiRqQPLmKut2prjBn3PrmAgVme9IYVm7V1xn2vlYCfO1xnbDEjNzEwy5Oe3rDXRSCiAmmqq4zc5g8y/VhjzLpCPO9T7oo2MBORhSLygYh0iMjVXpcnU6FeJpjVGfe9fo6eNNjrIpBHrBaz/T0hj0tC5aAoAzMR8QP4DYDjAUwBcKaITPG2VJnpCXOUGZFOrDGmHGekHysw29fd63FJqBwUZWAGYBaADqXUR0qpbgArACzxuEwZ6QmFOStTY9zz+mEuM31VB42uzH1dDMwodwGvC5DECACbbPc3A5htf4KIXAjgQgAYMmQI2tvb816ozs7OtN/nyz170NOjClIuL2VSJzoJ9YZYL3HK/VjZueMAAGDjxg1ob9+S9uvKvV6yUWp1sn2ruerHhx1oD3+St/cptXophHKsk2INzFJSSi0HsBwAWltbVVtbW97fs729HSnf5/8eAwBUVtWgQvWkfn6JS6tOdGHuewAIBAKslzjlfqw8uPUNYOunGDt2LNraWtJ+XbnXSzZKrU7WBzbgz+vXYPSYMWhrm5i39ym1eimEcqyTYu3K3AKg2XZ/pLmt6P33dw4FAPSEw5ydRaQRdmXq6+w5o3DF0RNw0dfHe10UKgPFGpi9AqBFRMaKSAWApQAe8bhMaRnavwqAMSuT9MVLtH6ig/9JN5UBP6489muRRLNEuSjKrkylVEhELgPwJAA/gNuVUu95XKy0WCfnUFiBp2giffi5LBMRuaBYW8yglHpcKTVRKTVeKXWT1+VJV9BvVGmICWa1s+LCOV4XgTwUaTFjfEZEOSjawKxURVrMerkkk27mjGtEZYBfKV2xxYyI3MCriMuC5pp5PWG2mOnIWpKFl2j9RFvMuPeJKHsMzFwW02LmcVmIqHA4K5OI3MDAzGWRMWZckklrvEbrx+/nTiei3DEwc5l9nAkvzvrhGDN9WS1mSvFHGRFlj1cRlwV90SrlYsb6ueDIcV4XgTwSMH+UcUI2EeWCgZnL2J2hNx9n5mnLFwnMGJkRUfYYmLkswK5MrUW7szwuCBVcpMWMO5+IcsDAzGUBtphozZz7AbaZ6Mfv48QfIsodAzOXxQz+97Ac5A2rO4uNJvqxgvJerpNLRDlgYOYyEWGrmcasrswwIzPtsMWMiNzAwCwPmAFcX9a+56VZP011FQCA7Xu6PC4JEZUyBmZ5YCWZJf34Ii1mHheECm5kQw0AYNOufR6XhIhKGSOIPOBixvryc4yZtiYMqgMAHDZmoMclIaJSFvC6AOUoyFxm2rIG/7PFTD/1NUG8fO0CNNRWeF0UIiphDMzyIDrGzOOCUMFF8ph5XA7yxuD+VV4XgYhKHLsy8yBgzs5iYKafSB4zRmZERJQFBmZ5EGBXpraEmf+JiCgHDMzyYM+BkNdFII9E8ph5XA4iIipNDMzyYOfebgDApp37PS4JFZqfg/+JiCgHDMyIXBRdkomRGRERZY6BGZGLOCuTiIhywcCMyEWclUlERLlgYEbkIh9nZRIRUQ4YmBG5KDL43+NyEBFRaWJgRuQiH9fKJCKiHDAwy4Pfnz/L6yKQRyJ5zBiYERFRFhiY5YHVnUX6sfY94zIiIsoGA7M88HORTG352GJGREQ5YGCWB1wrU1/M/E9ERLnwJDATkW+KyHsiEhaR1rjHrhGRDhH5QESO86J8ufL7GO/qyspjxsz/RESUjYBH7/sugFMB/Ld9o4hMAbAUwFQAwwE8IyITlVK9hS9i9tiVqa+AGZQPqGJwTkREmfMkMFNKvQ8AkhjALAGwQinVBWCDiHQAmAVgZWFLmBsO/tfXpGH9cNn8Cajbu9nrohARUQkSL7tcRKQdwP9TSr1q3v9PAKuUUn8w798G4Aml1H0Or70QwIUAMGTIkENXrFiR9/J2dnairq4u5fO27Anjur/tBwD8bmFtvovlqXTrRDesl0SsE2esl0SsE2esl0SlWifz589/TSnV6vRY3lrMROQZAEMdHrpOKfVwrn9fKbUcwHIAaG1tVW1tbbn+yZTa29uRzvus394J/O0FAEjr+aUs3TrRDeslEevEGeslEevEGeslUTnWSd4CM6XUMVm8bAuAZtv9kea2ksIxZkRERJSNYhuh/AiApSJSKSJjAbQAeNnjMmWMY8yIiIgoG16lyzhFRDYDmAvgMRF5EgCUUu8BuAfAGgD/B+DSUpuRCTCPGREREWXHq1mZDwJ4MMljNwG4qbAlchdbzIiIiCgbxdaVWRY4xoyIiIiywcAsDwLM/E9ERERZYASRB36OMSMiIqIsMDDLA6srs8LP6iUiIqL0ebVWZlmrCvpwSdt4LD5ouNdFISIiohLCwCwPRAQ/XDjJ62IQERFRiWFfGxEREVGRYGBGREREVCQYmBEREREVCQZmREREREWCgRkRERFRkWBgRkRERFQkGJgRERERFQkGZkRERERFgoEZERERUZFgYEZERERUJBiYERERERUJBmZERERERYKBGREREVGREKWU12XImYhsB/BxAd6qCcAXBXifUsI6ccZ6ScQ6ccZ6ScQ6ccZ6SVSqdTJaKTXI6YGyCMwKRUReVUq1el2OYsI6ccZ6ScQ6ccZ6ScQ6ccZ6SVSOdcKuTCIiIqIiwcCMiIiIqEgwMMvMcq8LUIRYJ85YL4lYJ85YL4lYJ85YL4nKrk44xoyIiIioSLDFjIiIiKhIMDBLg4gsFJEPRKRDRK72ujyFIiLNIvK8iKwRkfdE5B/M7QNF5GkRWWf+32BuFxG5xaynt0VkprefIL9ExC8ib4jIo+b9sSKy2vz8fxKRCnN7pXm/w3x8jJflzhcRGSAi94nIWhF5X0Tm8lgBROQfze/PuyJyt4hU6XisiMjtIrJNRN61bcv4+BCRc83nrxORc734LG5JUif/an6H3haRB0VkgO2xa8w6+UBEjrNtL6trlFO92B67SkSUiDSZ98vvWFFK8V8f/wD4AawHMA5ABYC3AEzxulwF+uzDAMw0b/cD8CGAKQB+CeBqc/vVAP7FvH0CgCcACIA5AFZ7/RnyXD9XArgLwKPm/XsALDVv3wrgEvP23wO41by9FMCfvC57nurjDgDfM29XABig+7ECYASADQCqbcfIeToeKwC+DmAmgHdt2zI6PgAMBPCR+X+DebvB68/mcp0cCyBg3v4XW51MMa8/lQDGmtclfzleo5zqxdzeDOBJGHlLm8r1WGGLWWqzAHQopT5SSnUDWAFgicdlKgil1GdKqdfN23sAvA/jQrMExkUY5v8nm7eXALhTGVYBGCAiwwpc7IIQkZEAFgH4rXlfABwN4D7zKfH1YtXXfQAWmM8vGyJSD+NkehsAKKW6lVK7wWMFAAIAqkUkAKAGwGfQ8FhRSv0FwM64zZkeH8cBeFoptVMptQvA0wAW5r/0+eFUJ0qpp5RSIfPuKgAjzdtLAKxQSnUppTYA6IBxfSq7a1SSYwUAfg3gBwDsg+PL7lhhYJbaCACbbPc3m9u0YnapHAJgNYAhSqnPzIe2Ahhi3taprm6GcYIIm/cbAey2nVDtnz1SL+bjX5rPLydjAWwH8L9m9+5vRaQWmh8rSqktAP4NwCcwArIvAbwGvY8Vu0yPDy2OG5u/g9EaBGheJyKyBMAWpdRbcQ+VXb0wMKOURKQOwP0Avq+U+sr+mDLajLWa2isiJwLYppR6zeuyFJEAjK6H/1JKHQJgL4yuqQhNj5UGGL/oxwIYDqAWJfKrvdB0PD76IiLXAQgB+KPXZfGaiNQAuBbAj70uSyEwMEttC4x+bctIc5sWRCQIIyj7o1LqAXPz51a3k/n/NnO7LnU1D8BJIrIRRrfB0QD+A0YTesB8jv2zR+rFfLwewI5CFrgANgPYrJRabd6/D0agpvuxcgyADUqp7UqpHgAPwDh+dD5W7DI9PrQ4bkTkPAAnAjjLDFgBvetkPIwfN2+Z592RAF4XkaEow3phYJbaKwBazFlUFTAG5D7icZkKwhzbchuA95VSv7I99AgAa4bLuQAetm0/x5wlMwfAl7ZuirKhlLpGKTVSKTUGxvHwnFLqLADPAzjdfFp8vVj1dbr5/LJqGVBKbQWwSUS+Zm5aAGANND9WYHRhzhGRGvP7ZNWLtsdKnEyPjycBHCsiDWZr5LHmtrIhIgthDJM4SSm1z/bQIwCWmjN3xwJoAfAyNLhGKaXeUUoNVkqNMc+7m2FMTNuKcjxWvJ59UAr/YMz6+BDGzJfrvC5PAT/3ETC6Ft4G8Kb57wQYY16eBbAOwDMABprPFwC/MevpHQCtXn+GAtRRG6KzMsfBOFF2ALgXQKW5vcq832E+Ps7rcuepLmYAeNU8Xh6CMRNK+2MFwE8BrAXwLoDfw5hVp92xAuBuGOPsemBcWM/P5viAMe6qw/z3Xa8/Vx7qpAPG2CjrnHur7fnXmXXyAYDjbdvL6hrlVC9xj29EdFZm2R0rzPxPREREVCTYlUlERERUJBiYERERERUJBmZERERERYKBGREREVGRYGBGREREVCQCqZ9CRFQeRMRKzwAAQwH0wlhKCgD2KaUO96RgREQmpssgIi2JyE8AdCql/s3rshARWdiVSUQEQEQ6zf/bROQFEXlYRD4SkWUicpaIvCwi74jIePN5g0TkfhF5xfw3z9tPQETlgIEZEVGigwFcDGAygO8AmKiUmgXgtwAuN5/zHwB+rZQ6DMBp5mNERDnhGDMiokSvKHPtThFZD+Apc/s7AOabt48BMMVYAhMA0F9E6pRSnQUtKRGVFQZmRESJumy3w7b7YUTPmz4Ac5RSBwpZMCIqb+zKJCLKzlOIdmtCRGZ4WBYiKhMMzIiIsnMFgFYReVtE1sAYk0ZElBOmyyAiIiIqEmwxIyIiIioSDMyIiIiIigQDMyIiIqIiwcCMiIiIqEgwMCMiIiIqEgzMiIiIiIoEAzMiIiKiIsHAjIiIiKhI/H/mTzJZlehnHAAAAABJRU5ErkJggg==
# Chart as in the screencast. First should have 5 distinctive 'peaks'
# Now that we have the time series, let's split it so we can start forecasting
# In[8]:
split_time = 1100
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
plt.figure(figsize=(10, 6))
plot_series(time_train, x_train)
plt.show()
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plt.show()
# EXPECTED OUTPUT
# Chart WITH 4 PEAKS between 50 and 65 and 3 troughs between -12 and 0
# Chart with 2 Peaks, first at slightly above 60, last at a little more than that, should also have a single trough at about 0
# # Naive Forecast
# In[9]:
naive_forecast = series[split_time - 1: -1]
# In[10]:
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, naive_forecast)
# Expected output: Chart similar to above, but with forecast overlay
# Let's zoom in on the start of the validation period:
# In[11]:
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, start=0, end=150)
plot_series(time_valid, naive_forecast, start=1, end=151)
# EXPECTED - Chart with X-Axis from 1100-1250 and Y Axes with series value and projections. Projections should be time stepped 1 unit 'after' series
# Now let's compute the mean squared error and the mean absolute error between the forecasts and the predictions in the validation period:
# In[12]:
print(keras.metrics.mean_squared_error(x_valid, naive_forecast).numpy())
print(keras.metrics.mean_absolute_error(x_valid, naive_forecast).numpy())
# Expected Output
# 19.578304
# 2.6011968
# That's our baseline, now let's try a moving average:
# In[13]:
def moving_average_forecast(series, window_size):
"""Forecasts the mean of the last few values.
If window_size=1, then this is equivalent to naive forecast"""
forecast = []
for time in range(len(series) - window_size):
forecast.append(series[time:time + window_size].mean())
return np.array(forecast)
# In[14]:
moving_avg = moving_average_forecast(series, 30)[split_time - 30:]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, moving_avg)
# EXPECTED OUTPUT
# CHart with time series from 1100->1450+ on X
# Time series plotted
# Moving average plotted over it
# In[15]:
print(keras.metrics.mean_squared_error(x_valid, moving_avg).numpy())
print(keras.metrics.mean_absolute_error(x_valid, moving_avg).numpy())
# EXPECTED OUTPUT
# 65.786224
# 4.3040023
# In[17]:
diff_series = (series[365:]- series[:-365])
diff_time = time[365:]
plt.figure(figsize=(10, 6))
plot_series(diff_time, diff_series)
plt.show()
# EXPECETED OUTPUT: CHart with diffs
# Great, the trend and seasonality seem to be gone, so now we can use the moving average:
# In[18]:
diff_moving_avg = moving_average_forecast(diff_series, 50)[split_time - 365 - 50:]
plt.figure(figsize=(10, 6))
plot_series(time_valid, diff_series[split_time - 365:])
plot_series(time_valid, diff_moving_avg)
plt.show()
# Expected output. Diff chart from 1100->1450 +
# Overlaid with moving average
# Now let's bring back the trend and seasonality by adding the past values from t – 365:
# In[19]:
diff_moving_avg_plus_past = series[split_time - 365:-365] + diff_moving_avg
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, diff_moving_avg_plus_past)
plt.show()
# Expected output: Chart from 1100->1450+ on X. Same chart as earlier for time series, but projection overlaid looks close in value to it
# In[20]:
print(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_past).numpy())
print(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_past).numpy())
# EXPECTED OUTPUT
# 8.498155
# 2.327179
# Better than naive forecast, good. However the forecasts look a bit too random, because we're just adding past values, which were noisy. Let's use a moving averaging on past values to remove some of the noise:
# In[21]:
diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time - 370:-360], 10) + diff_moving_avg
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, diff_moving_avg_plus_smooth_past)
plt.show()
# EXPECTED OUTPUT:
# Similar chart to above, but the overlaid projections are much smoother
# In[23]:
print(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_smooth_past).numpy())
print(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_smooth_past).numpy())
# EXPECTED OUTPUT
# 12.527958
# 2.2034433
# In[ ]:
# Now click the 'Submit Assignment' button above.
# Once that is complete, please run the following two cells to save your work and close the notebook
# In[ ]:
get_ipython().run_cell_magic('javascript', '', '<!-- Save the notebook -->\nIPython.notebook.save_checkpoint();')
# In[ ]:
get_ipython().run_cell_magic('javascript', '', 'IPython.notebook.session.delete();\nwindow.onbeforeunload = null\nsetTimeout(function() { window.close(); }, 1000);')
| 184.367647
| 43,275
| 0.937425
|
794e732617dd74f7767b97faaed5979567149da5
| 5,670
|
py
|
Python
|
practicer/gui/pyside/widgets/exercise_details.py
|
DominikPott/practicer
|
1e0f10d3cc9ec17ead067708e3334223fbeb72ea
|
[
"MIT"
] | 1
|
2021-10-01T09:15:08.000Z
|
2021-10-01T09:15:08.000Z
|
practicer/gui/pyside/widgets/exercise_details.py
|
DominikPott/practicer
|
1e0f10d3cc9ec17ead067708e3334223fbeb72ea
|
[
"MIT"
] | 3
|
2021-04-18T11:13:25.000Z
|
2021-04-19T16:36:47.000Z
|
practicer/gui/pyside/widgets/exercise_details.py
|
DominikPott/practicer
|
1e0f10d3cc9ec17ead067708e3334223fbeb72ea
|
[
"MIT"
] | null | null | null |
from PySide2 import QtWidgets, QtCore, QtGui
import practicer.gui.pyside.resources # pyside compiles resources. Holds the icons for qresources
class ExerciseSpreadSheet(QtWidgets.QWidget):
def __init__(self, exercise, stats, parent=None):
super(ExerciseSpreadSheet, self).__init__(parent)
self.exercise = exercise
self._stats = stats
self.thumbnail = QtWidgets.QLabel()
self.thumbnail.setMinimumSize(640, 360)
self.exercise_name = QtWidgets.QLabel()
self.exercise_name.setAlignment(QtCore.Qt.AlignCenter)
self.exercise_name.setFixedHeight(50)
font = QtGui.QFont("Times", 12, QtGui.QFont.DemiBold)
font.setCapitalization(QtGui.QFont.Capitalize)
self.exercise_name.setFont(font)
self.instructionsGroup = QtWidgets.QGroupBox("Instructions:")
self.instruction = QtWidgets.QLabel()
self.instruction.setMinimumHeight(100)
self.instruction.setAlignment(QtCore.Qt.AlignLeading)
self.instruction.setFont(QtGui.QFont("Times", 10))
self.instruction.setWordWrap(True)
self.instructionsLayout = QtWidgets.QVBoxLayout()
self.instructionsLayout.addWidget(self.instruction)
self.instructionsGroup.setLayout(self.instructionsLayout)
self.linksGroup = QtWidgets.QGroupBox("Tutorials:")
self.links = QtWidgets.QLabel()
self.links.setAlignment(QtCore.Qt.AlignLeading)
self.links.setOpenExternalLinks(True)
self.linksLayout = QtWidgets.QVBoxLayout()
self.linksLayout.addWidget(self.links)
self.linksGroup.setLayout(self.linksLayout)
self.statsGroup = QtWidgets.QGroupBox("Level:")
self.statsGroup.setFixedWidth(250)
self.stats = StatsWidget(stats=self._stats)
self.statsLayout = QtWidgets.QVBoxLayout()
self.statsLayout.addWidget(self.stats)
self.statsGroup.setLayout(self.statsLayout)
self.additional = QtWidgets.QWidget()
self.additional.setFixedHeight(160)
self.additional_layout = QtWidgets.QHBoxLayout()
self.additional_layout.addWidget(self.statsGroup)
self.additional_layout.addWidget(self.linksGroup)
self.additional_layout.setMargin(0)
self.additional.setLayout(self.additional_layout)
self.setLayout(QtWidgets.QVBoxLayout())
self.layout().addWidget(self.exercise_name)
self.layout().addWidget(self.thumbnail)
self.layout().addWidget(self.instructionsGroup)
self.layout().addWidget(self.additional)
self._refresh()
def _refresh(self):
self.exercise_name.setText(self.exercise.get("label", "No Label"))
self._update_thumbnail()
self.instruction.setText(self.exercise.get("instruction", "No Instructions"))
self.links.setText(self._format_hyperlinks())
self.stats.refresh(self._stats)
def _format_hyperlinks(self):
links = self.exercise.get("hyperlinks", [""])
formated_links = [
"<a href='{link}' style='color: gray;'>{short_link}</a >".format(link=link, short_link=link[:30]) for link
in links]
return "<br>".join(formated_links)
def _update_thumbnail(self):
thumbnail_path = self.exercise.get("thumbnail", "")
thumbnail = QtGui.QPixmap(thumbnail_path).scaled(640, 360, QtCore.Qt.AspectRatioMode.KeepAspectRatio,
QtCore.Qt.TransformationMode.SmoothTransformation)
self.thumbnail.setPixmap(thumbnail)
self.thumbnail.setScaledContents(True)
def refresh(self, exercise, stats):
self.exercise = exercise
self._stats = stats
self._refresh()
class StatsWidget(QtWidgets.QWidget):
def __init__(self, stats, parent=None):
super(StatsWidget, self).__init__(parent=parent)
self._stats = stats
self.level_progress = QtWidgets.QProgressBar()
self.level = LevelWidget()
self.setLayout(QtWidgets.QVBoxLayout())
self.layout().addWidget(self.level)
self.layout().addWidget(self.level_progress)
self.refresh(stats=self._stats)
def refresh(self, stats):
self._stats = stats
self.level.refresh(self._stats.get("level", 0))
self.level_progress.setRange(0, self._stats.get("level_max_progress", 100))
self.level_progress.setValue(self._stats.get("progress", 0.0))
class LevelWidget(QtWidgets.QWidget):
MAX_LEVELS = 5 # TODO: This is buisness logic move into api
ENABLED = QtGui.QImage(":/icons/enabled.png")
DISABLED = QtGui.QImage(":/icons/disabled.png")
def __init__(self, level=0, parent=None):
super(LevelWidget, self).__init__(parent=parent)
self.level = level
self.setLayout(QtWidgets.QHBoxLayout())
self.level_icons = []
def populate(self):
self._clear()
for index, _ in enumerate(range(self.MAX_LEVELS), 1):
image = self.ENABLED if index <= self.level else self.DISABLED
icon = self._build_icon(pixmap=QtGui.QPixmap.fromImage(image))
self.level_icons.append(icon)
self.layout().addWidget(icon)
def _build_icon(self, pixmap):
container = QtWidgets.QLabel()
container.setPixmap(pixmap)
container.setFixedSize(32, 32)
container.setScaledContents(True)
return container
def refresh(self, level):
self.level = level
self.populate()
def _clear(self):
for widget in self.level_icons:
self.layout().removeWidget(widget)
widget.clear()
self.level_icons = []
| 39.103448
| 118
| 0.669841
|
794e7371166f7cb980dfa43dbd5c112311721162
| 1,712
|
py
|
Python
|
apps/readux/urls.py
|
ecds/readux
|
4eac8b48efef8126f4f2be28b5eb943c85a89c2e
|
[
"Apache-2.0"
] | 18
|
2017-06-12T09:58:02.000Z
|
2021-10-01T11:14:34.000Z
|
apps/readux/urls.py
|
ecds/readux
|
4eac8b48efef8126f4f2be28b5eb943c85a89c2e
|
[
"Apache-2.0"
] | 276
|
2019-04-26T20:13:01.000Z
|
2022-03-31T10:26:28.000Z
|
apps/readux/urls.py
|
ecds/readux
|
4eac8b48efef8126f4f2be28b5eb943c85a89c2e
|
[
"Apache-2.0"
] | 7
|
2018-03-13T23:44:26.000Z
|
2021-09-15T17:54:55.000Z
|
"""URL patterns for the Readux app"""
from django.urls import path
from . import views, annotations
from .search import SearchManifestCanvas
urlpatterns = [
path('collection/', views.CollectionsList.as_view(), name='collections list'),
path('volume/', views.VolumesList.as_view(), name='volumes list'),
path('collection/<collection>/', views.CollectionDetail.as_view(), name="collection"),
path('volume/<volume>', views.VolumeDetail.as_view(), name='volume'),
path('volume/<volume>/page/all', views.PageDetail.as_view(), name='volumeall'),
# url for page altered to prevent conflict with Wagtail
# TODO: find another way to resolve this conflict
path('volume/<volume>/page/<page>', views.PageDetail.as_view(), name='page'),
path('volume/<volume>/export', views.ExportOptions.as_view(), name='export'),
path(
'volume/<volume>/<filename>/export_download',
views.ExportDownload.as_view(),
name='export_download'
),
path(
'volume/<filename>/export_download_zip',
views.ExportDownloadZip.as_view(),
name='export_download_zip'
),
path('annotations/', annotations.Annotations.as_view(), name='post_user_annotations'),
path(
'annotations/<username>/<volume>/list/<canvas>',
annotations.Annotations.as_view(),
name='user_annotations'
),
path('annotations-crud/', annotations.AnnotationCrud.as_view(), name='crud_user_annotation'),
path('search/', views.VolumeSearch.as_view(), name='search'),
path('_anno_count/<volume>/<page>', views.AnnotationsCount.as_view(), name='_anno_count'),
path('search/volume/pages', SearchManifestCanvas.as_view(), name='search_pages'),
]
| 46.27027
| 97
| 0.689836
|
794e769e2c86295b3a4eaea3770ec5a9df5aee46
| 7,387
|
py
|
Python
|
src/.history/Test2/HiwinRT605_Strategy_test_v2_20190628092449.py
|
SamKaiYang/2019_Hiwin_Shaking
|
d599f8c87dc4da89eae266990d12eb3a8b0f3e16
|
[
"MIT"
] | null | null | null |
src/.history/Test2/HiwinRT605_Strategy_test_v2_20190628092449.py
|
SamKaiYang/2019_Hiwin_Shaking
|
d599f8c87dc4da89eae266990d12eb3a8b0f3e16
|
[
"MIT"
] | null | null | null |
src/.history/Test2/HiwinRT605_Strategy_test_v2_20190628092449.py
|
SamKaiYang/2019_Hiwin_Shaking
|
d599f8c87dc4da89eae266990d12eb3a8b0f3e16
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# license removed for brevity
#策略 機械手臂 四點來回跑
import threading
import time
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import math
import enum
#import Hiwin_RT605_ArmCommand_Socket as ArmTask
import Hiwin_RT605_Arm_Command_v2 as ArmTask # 0628
from std_msgs.msg import Int32MultiArray
##----Arm state-----------
Arm_state_flag = 0
Strategy_flag = 0
Sent_data_flag = True
##----Arm status enum
class Arm_status(enum.IntEnum):
Idle = 0
Isbusy = 1
Error = 2
shutdown = 6
#-----------server feedback arm state----------
# def Arm_state(req):
# global CurrentMissionType,Strategy_flag,Arm_state_flag
# Arm_state_flag = int('%s'%req.Arm_state)
# if Arm_state_flag == Arm_status.Isbusy: #表示手臂忙碌
# Strategy_flag = False
# return(1)
# if Arm_state_flag == Arm_status.Idle: #表示手臂準備
# Strategy_flag = True
# return(0)
# if Arm_state_flag == Arm_status.shutdown: #表示程式中斷
# Strategy_flag = 6
# return(6)
# ##-----------server feedback Sent_flag----------
# def Sent_flag(req):
# global Sent_data_flag
# Sent_data_flag = int('%s'%req.sent_flag)
# return(1)
def callback(state):
global Arm_state_flag,Sent_data_flag
# rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
Arm_state_flag = state.data[0]
Sent_data_flag = state.data[1]
#print(state.data)
def arm_state_listener():
#rospy.init_node(NAME)
#s = rospy.Service('arm_state',arm_state, Arm_state) ##server arm state
#a = rospy.Service('sent_flag',sent_flag,Sent_flag)
#rospy.spin() ## spin one
rospy.Subscriber("chatter", Int32MultiArray, callback)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##------------class-------
class point():
def __init__(self,x,y,z,pitch,roll,yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##-------------------------strategy---------------------
action = 0
def Mission_Trigger():
global action,Arm_state_flag,Sent_data_flag
#print("Arm :",Arm_state_flag)
#print("Sent:",Sent_data_flag)
if Arm_state_flag == Arm_status.Idle and Sent_data_flag == 1:
Sent_data_flag = 0
Arm_state_flag = Arm_status.Isbusy
for case in switch(action): #傳送指令給socket選擇手臂動作
if case(0):
pos.x = 10
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
action = 1
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
# ArmTask.point_data(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
# ArmTask.Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
break
if case(1):
pos.x = 10
pos.y = 42
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
action = 2
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
# ArmTask.point_data(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
# ArmTask.Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
break
if case(2):
pos.x = -10
pos.y = 42
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
action = 3
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
# ArmTask.point_data(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
# ArmTask.Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
break
if case(3):
pos.x = -10
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
action = 4
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
# ArmTask.point_data(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
# ArmTask.Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
break
if case(4):
pos.x = 0
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
action = 0
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
# ArmTask.point_data(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
# ArmTask.Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
break
if case(): # default, could also just omit condition or 'if True'
rospy.on_shutdown(myhook)
ArmTask.rospy.on_shutdown(myhook)
#action: ptp line
#ra : abs rel
#grip 夾爪
#vel speed
#both : Ctrl_Mode
##-------------strategy end ------------
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
argv = rospy.myargv()
rospy.init_node('strategy', anonymous=True)
GetInfoFlag = True #Test no data
arm_state_listener()
start_input=int(input('開始策略請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
#time.sleep(0.3) #0627 最穩定 delay 0.3秒
# my_list=[]
# for i in range(2500000):
# my_list.append(i)
Mission_Trigger()
if start_input == 3:
pass
#timer.join()
ArmTask.rospy.spin()
rospy.spin()
| 36.389163
| 117
| 0.542981
|
794e76b2a2c5085d1808cb08b6a0576a2a223d54
| 7,054
|
py
|
Python
|
rxcs/ana/SNR.py
|
JacekPierzchlewski/RxCS
|
250f9ebfe9c12f49754f354e60cc511be76e4632
|
[
"BSD-2-Clause"
] | 3
|
2015-02-10T17:57:33.000Z
|
2017-03-16T00:46:45.000Z
|
rxcs/ana/SNR.py
|
JacekPierzchlewski/RxCS
|
250f9ebfe9c12f49754f354e60cc511be76e4632
|
[
"BSD-2-Clause"
] | null | null | null |
rxcs/ana/SNR.py
|
JacekPierzchlewski/RxCS
|
250f9ebfe9c12f49754f354e60cc511be76e4632
|
[
"BSD-2-Clause"
] | null | null | null |
"""|
This module contains SNR evaluation function of the reconstructed signals. |br|
*Examples*:
Please go to the *examples/analysis* directory for examples
on how to use the SNR analysis modules. |br|
*Settings*:
Parameters of the SNR analysis are described below.
Take a look on '__inputSignals' function for more info on the
parameters.
Parameters of the SNR analysis are attributes of the class which
must/can be set before the analysis is run.
Required parameters:
- a. **mSig** (*2D Numpy array*): list with signals to be tested
- b. **mSigRef** (*2D Numpy array*): list with reference signals
Optional parameters:
- c. **strComment** (*string*): an optional comment to the name of
the SNR analysis module
- d. **iSNRSuccess** (*float*): success threshold. SNR over this
threshold is treated as a successful
reconstruction [default = not given]
- e. **bMute** (*int*): mute the console output from the sampler [default = 0]
*Output*:
Description of the SNR analysis output is below.
This is the list of attributes of the class which are available after
calling the 'run' method:
- a. **iSNR** (*float*): the average SNR
- b. **vSNR** (*float*): SNR for every signal
- c. **iSR** (*float*): average success ratio
- d. **vSuccessBits** (*float*): list with success flags for every signal
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <jap@es.aau.dk>
*Version*:
0.1 | 20-MAY-2014 : * Initial version. |br|
0.2 | 21-MAY-2014 : * Success Ratio computation is added. |br|
0.3 | 21-MAY-2014 : * Docstrings added. |br|
0.4 | 21-MAY-2014 : * Configuration with a dictionary |br|
0.5 | 21-MAY-2014 : * Progress and results printing |br|
1.0 | 21-MAY-2014 : * Version 1.0 released. |br|
2,0 | 21-AUG-2015 : * Version 2,0 (objectified version) is released. |br|
2.0r1 | 25-AUG-2015 : * Improvements in headers |br|
2,1 | 09-SEP-2015 : * Optional comment to the name was added |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import numpy as np
import rxcs
class SNR(rxcs._RxCSobject):
def __init__(self, *args):
rxcs._RxCSobject.__init__(self) # Make it a RxCS object
self.strRxCSgroup = 'Analysis' # Name of group of RxCS modules
self.strModuleName = 'SNR' # Module name
self.__inputSignals() # Define the input signals
self.__parametersDefine() # Define the parameters
# Define parameters
def __inputSignals(self):
# Signal under test
self.paramAddMan('mSig', 'Signal under test', noprint=1)
self.paramType('mSig', np.ndarray) # Must be a Numpy array
self.paramTypeEl('mSig', (int, float)) # Elements must be of float or int type
self.paramNDimLE('mSig', 2) # Must be a 1, or 2 dimensional matrix
# Reference signal
self.paramAddMan('mSigRef', 'Reference signal', noprint=1)
self.paramType('mSigRef', np.ndarray) # Must be a Numpy array
self.paramTypeEl('mSigRef', (int, float)) # Elements must be of float or int type
self.paramNDimLE('mSigRef', 2) # Must be a 1, or 2 dimensional matrix
self.paramDimEq('mSigRef', 'mSig', 'rows', 'rows') # Must have shape equal to mSig
self.paramDimEq('mSigRef', 'mSig', 'columns', 'columns') # ^
# Define parameters
def __parametersDefine(self):
# Success threshold
self.paramAddOpt('iSNRSuccess', 'Success threshold')
self.paramType('iSNRSuccess', (int, float)) # Must be a Numpy array
self.paramH('iSNRSuccess', -np.inf)
self.paramL('iSNRSuccess', np.inf)
# Additional comment in printing
self.paramAddOpt('strComment', 'Additional comment in printing', noprint=1, default='')
self.paramType('strComment', (str)) # Must be a Numpy array
# 'Mute the output' flag
self.paramAddOpt('bMute', 'Mute the output', noprint=1, default=0)
self.paramType('bMute', int) # Must be of int type
self.paramAllowed('bMute',[0, 1]) # It can be either 1 or 0
# Run
def run(self):
self.parametersCheck() # Check if all the needed partameters are in place and are correct
self.addComment2Name() # Add a comment to the name of the SNR analysis, if needed
self.parametersPrint() # Print the values of parameters
self.engineStartsInfo() # Info that the engine starts
self.__engine() # Run the engine
self.engineStopsInfo() # Info that the engine ends
return self.__dict__ # Return dictionary with the parameters
# Add a comment to the name of the module, if needed
def addComment2Name(self):
if not (self.strComment == ''):
if not 'strModuleName_' in self.__dict__:
self.strModuleName_ = self.strModuleName
self.strModuleName = self.strModuleName_ + ' [' + self.strComment + ']'
self.strComment = ''
return
# Engine - compute the noise and the success rate
def __engine(self):
# Make the 2D matrces with signals under test and observed signals
self.mSig = self.makeArray2Dim(self.mSig)
self.mSigRef = self.makeArray2Dim(self.mSigRef)
# Get the number of signals and the size of signals
(nSigs, iSizSig) = self.mSig.shape # Size of the noise
# Compute the noise
mNoise = np.abs(self.mSig - self.mSigRef)
(_, iSizNoise) = mNoise.shape # Size of the noise
# Compute the power of noise
vNoiseP = (np.sum(mNoise**2, axis=1) / iSizSig)
# Compute the power of reference signals
vSigP = (np.sum(self.mSigRef**2, axis=1) / iSizSig)
# Compute the SNR for every reconstructed signal and the average SNR
self.vSNR = 10 * np.log10(vSigP / vNoiseP)
self.iSNR = self.vSNR.mean()
# Compute the success for every reconstructed signal and the success ratio
self.iSR = np.nan
if self.wasParamGiven('iSNRSuccess'):
self.vSuccessBits = (self.vSNR >= self.iSNRSuccess)
self.iSR = self.vSuccessBits.mean()
# Print results
if self.bMute == 0:
self._printResults(self.iSNR, self.iSR, self.iSNRSuccess)
return
# Print the results of analysis
def _printResults(self, iSNR, iSR, iSNRSuccess):
rxcs.console.bullet_param('The average SNR of the reconstruction',
self.iSNR, '-', 'dB')
if self.wasParamGivenVal(self.iSR):
rxcs.console.param('The Success Ratio', iSR, ' ', '')
rxcs.console.param('(success threshold)', iSNRSuccess, '-', 'dB')
return
| 37.924731
| 100
| 0.612418
|
794e76d9d20b394b433c41aa35e96783e3b85d90
| 2,022
|
py
|
Python
|
system/result_printer.py
|
hz512/Smart-Parking-Enforcement-System
|
e990903de545693ad6e2536bf167c69ab672d16a
|
[
"MIT"
] | null | null | null |
system/result_printer.py
|
hz512/Smart-Parking-Enforcement-System
|
e990903de545693ad6e2536bf167c69ab672d16a
|
[
"MIT"
] | null | null | null |
system/result_printer.py
|
hz512/Smart-Parking-Enforcement-System
|
e990903de545693ad6e2536bf167c69ab672d16a
|
[
"MIT"
] | null | null | null |
import os
class Printer:
def __init__(self,
features=None,
width=None):
if features is None:
features = ['slot_id', 'status', 'duration', 'violation', 'msg']
width = [20, 10, 10, 10, 30]
self.features = features
self.width = width
def print(self, records):
info = []
for record in records:
line = [record[column] for column in self.features]
info.append(line)
self._pprint(info)
def _pprint(self, slot_status):
os.system('cls||clear')
self._print_msg_box('Parking monitoring in progress',
title='Parking time analysis',
indent=4,
width=sum(self.width))
line_format = '\t'
for i, w in enumerate(self.width):
line_format += '| {{{}: <{}}} |'.format(i, w)
header = line_format.format(*self.features)
print('\t' + '-' * len(header))
print(header)
print('\t' + '-' * len(header))
for line in slot_status:
#print(line)
string = line_format.format(*line)
print(string)
print('\t' + '-' * len(header))
@classmethod
def _print_msg_box(cls, msg, indent=1, width=None, title=None, padding='\t'):
"""Print message-box with optional title."""
lines = msg.split('\n')
space = " " * indent
if not width:
width = max(map(len, lines))
box = padding + f'╔{"═" * (width + indent * 2)}╗\n' # upper_border
if title:
box += padding + f'║{space}{title:<{width}}{space}║\n' # title
box += padding + f'║{space}{"-" * width:<{width}}{space}║\n' # underscore
box += ''.join([padding + f'║{space}{line:<{width}}{space}║\n' for line in lines])
box += padding + f'╚{"═" * (width + indent * 2)}╝' # lower_border
print(box)
| 38.150943
| 91
| 0.487636
|
794e76e9d949cf08c76f8279791e67fee228fca9
| 3,749
|
py
|
Python
|
watertap3/watertap3/wt_units/basic_unit.py
|
kurbansitterley/WaterTAP3
|
8f4493182a39e3ba180019aba02249916dbae500
|
[
"BSD-3-Clause"
] | null | null | null |
watertap3/watertap3/wt_units/basic_unit.py
|
kurbansitterley/WaterTAP3
|
8f4493182a39e3ba180019aba02249916dbae500
|
[
"BSD-3-Clause"
] | 34
|
2021-06-25T17:54:12.000Z
|
2021-06-25T17:54:27.000Z
|
watertap3/watertap3/wt_units/basic_unit.py
|
kurbansitterley/WaterTAP3
|
8f4493182a39e3ba180019aba02249916dbae500
|
[
"BSD-3-Clause"
] | 4
|
2021-06-25T18:32:31.000Z
|
2022-03-24T20:24:18.000Z
|
from pyomo.environ import Block, Expression, units as pyunits
from watertap3.utils import cost_curves, financials
from watertap3.wt_units.wt_unit import WT3UnitProcess
## REFERENCE: ADD REFERENCE HERE
module_name = 'basic_unit'
tpec_or_tic = 'TPEC'
class UnitProcess(WT3UnitProcess):
def fixed_cap(self):
'''
:param flow_in: Flow in to basic unit [m3/hr]
:type flow_in: float
'''
time = self.flowsheet().config.time.first()
sys_cost_params = self.parent_block().costing_param
flow_in_m3yr = pyunits.convert(self.flow_in, to_units=(pyunits.m ** 3 / pyunits.year))
if self.unit_process_name == "tramp_oil_tank":
disposal_cost = 0.000114 # Kiran's disposal cost assumption $/m3
self.costing.other_var_cost = flow_in_m3yr * disposal_cost * sys_cost_params.plant_cap_utilization
if self.kind == 'flow':
flow_basis = self.basis * (pyunits.m ** 3 / pyunits.hour)
flow_factor = self.flow_in / flow_basis
basic_cap = self.cap_basis * flow_factor ** self.cap_exp
return basic_cap
if self.kind == 'mass':
constituents = self.config.property_package.component_list
mass_basis = self.basis * (pyunits.kg / pyunits.hour)
mass_in = 0
for constituent in constituents:
mass_in += self.conc_mass_in[time, constituent]
density = 0.6312 * mass_in + 997.86
total_mass_in = density * self.flow_in
mass_factor = total_mass_in / mass_basis
basic_cap = self.cap_basis * mass_factor ** self.cap_exp
return basic_cap
def elect(self):
'''
Electricity intensity for basic units.
:return: Electricity intensity [kWh/m3]
'''
if self.unit_process_name in ['mbr_nitrification', 'mbr_denitrification'] and not self.case_specific:
# Electricity consumption for MBRs from:
# "Assessing Location and Scale of Urban Nonpotable Water Reuse Systems for Life-Cycle Energy Consumption and Greenhouse Gas Emissions" Kavvada et al (2016)
# Equation located in SI
return 9.5 * self.flow_in ** -0.3
else:
return self.elect_intensity
def get_costing(self, unit_params=None, year=None):
'''
Initialize the unit in WaterTAP3.
'''
time = self.flowsheet().config.time.first()
self.flow_in = pyunits.convert(self.flow_vol_in[time], to_units=pyunits.m ** 3 / pyunits.hr)
self.unit_process_name = unit_params['unit_process_name']
if 'case_specific' in unit_params.keys():
self.case_specific = unit_params['case_specific']
self.basis, self.cap_basis, self.cap_exp, self.elect_intensity, self.basis_year, self.kind = cost_curves.basic_unit(self.unit_process_name, case_specific=self.case_specific)
else:
self.case_specific = False
self.basis, self.cap_basis, self.cap_exp, self.elect_intensity, self.basis_year, self.kind = cost_curves.basic_unit(self.unit_process_name)
self.chem_dict = {}
financials.create_costing_block(self, self.basis_year, tpec_or_tic)
self.deltaP_outlet.unfix()
self.deltaP_waste.unfix()
self.pressure_out.fix(1)
self.pressure_waste.fix(1)
self.costing.fixed_cap_inv_unadjusted = Expression(expr=self.fixed_cap(),
doc='Unadjusted fixed capital investment')
self.electricity = Expression(expr=self.elect(),
doc='Electricity intensity [kwh/m3]')
financials.get_complete_costing(self.costing)
| 44.105882
| 185
| 0.644705
|
794e782971f595fde9db73666f0625c9f1366d91
| 1,767
|
py
|
Python
|
message/tests/test_message.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
message/tests/test_message.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
message/tests/test_message.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
from django.test.testcases import TestCase
from user.tests.utils import create_active_user
from conversation.tests.utils import create_private_chat
from message.models import Message
from message.queryset import get_chat_messages
from .utils import create_deleted_msg, create_message
from core.tests.utils import assert_items_are_same_as_data
class MessageModelTest(TestCase):
def setUp(self) -> None:
self.user1 = create_active_user()
self.user2 = create_active_user()
self.pv = create_private_chat(self.user1, self.user2)
def test_deleted_msgs_not_in_chat_queryset(self):
msg1 = create_message(self.user1, self.pv)
msg2 = create_message(self.user1, self.pv)
msg3 = create_message(self.user2, self.pv)
assert_items_are_same_as_data(
items=[msg1.pk, msg2.pk, msg3.pk],
data=get_chat_messages(self.pv.pk, self.user1.pk).values(),
data_key='id'
)
msg2.soft_delete()
assert_items_are_same_as_data(
items=[msg1.pk, msg3.pk],
data=get_chat_messages(self.pv.pk, self.user1.pk).values(),
data_key='id'
)
assert_items_are_same_as_data(
items=[msg1.pk, msg3.pk],
data=get_chat_messages(self.pv.pk, self.user2.pk).values(),
data_key='id'
)
create_deleted_msg(msg3, self.user1)
assert_items_are_same_as_data(
items=[msg1.pk],
data=get_chat_messages(self.pv.pk, self.user1.pk).values(),
data_key='id'
)
assert_items_are_same_as_data(
items=[msg1.pk, msg3.pk],
data=get_chat_messages(self.pv.pk, self.user2.pk).values(),
data_key='id'
)
| 32.127273
| 71
| 0.647425
|
794e785f410604ab58cbd85ab22417259c4857c8
| 13,883
|
py
|
Python
|
tests/open_alchemy/schemas/validation/test_unique_secondary.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 40
|
2019-11-05T06:50:35.000Z
|
2022-03-09T01:34:57.000Z
|
tests/open_alchemy/schemas/validation/test_unique_secondary.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 178
|
2019-11-03T04:10:38.000Z
|
2022-03-31T00:07:17.000Z
|
tests/open_alchemy/schemas/validation/test_unique_secondary.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 17
|
2019-11-04T07:22:46.000Z
|
2022-03-23T05:29:49.000Z
|
"""Tests for spec unique tablename."""
import pytest
from open_alchemy.schemas import validation
CHECK_TESTS = [
pytest.param({"Schema1": {}}, True, None, id="single schema not constructable"),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {"prop_1": {"type": "integer"}},
}
},
True,
None,
id="single schema single property not many-to-many",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
}
},
}
},
True,
None,
id="single schema single property many-to-many",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_2"},
},
},
}
},
True,
None,
id="single schema multiple property many-to-many different secondary",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
}
},
False,
("Schema1", "prop_2", "association_1", "Schema1", "prop_1"),
id="single schema multiple property many-to-many same secondary",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_2"},
},
"prop_3": {
"type": "array",
"items": {"x-secondary": "association_3"},
},
},
}
},
True,
None,
id="single schema many property many-to-many different secondary",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
"prop_3": {
"type": "array",
"items": {"x-secondary": "association_3"},
},
},
}
},
False,
("Schema1", "prop_2", "association_1", "Schema1", "prop_1"),
id="single schema many property many-to-many same secondary first",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_2"},
},
"prop_3": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
}
},
False,
("Schema1", "prop_3", "association_1", "Schema1", "prop_1"),
id="single schema many property many-to-many same secondary first and last",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_2"},
},
"prop_3": {
"type": "array",
"items": {"x-secondary": "association_2"},
},
},
}
},
False,
("Schema1", "prop_3", "association_2", "Schema1", "prop_2"),
id="single schema many property many-to-many same secondary last",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
"prop_3": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
}
},
False,
("Schema1", "prop_2", "association_1", "Schema1", "prop_2"),
id="single schema many property many-to-many same secondary all",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
"Schema2": {
"x-tablename": "schema_2",
"properties": {
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_2"},
},
},
},
},
True,
None,
id="multiple schema single property many-to-many different secondary",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
"Schema2": {
"x-tablename": "schema_2",
"properties": {
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
},
False,
("Schema2", "prop_2", "association_1", "Schema1", "prop_1"),
id="multiple schema single property many-to-many same secondary",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
"Schema2": {
"x-tablename": "schema_2",
"properties": {
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_2"},
},
},
},
"Schema3": {
"x-tablename": "schema_3",
"properties": {
"prop_3": {
"type": "array",
"items": {"x-secondary": "association_3"},
},
},
},
},
True,
None,
id="many schema single property many-to-many different secondary",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
"Schema2": {
"x-tablename": "schema_2",
"properties": {
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
"Schema3": {
"x-tablename": "schema_3",
"properties": {
"prop_3": {
"type": "array",
"items": {"x-secondary": "association_3"},
},
},
},
},
False,
("Schema2", "prop_2", "association_1", "Schema1", "prop_1"),
id="many schema single property many-to-many same secondary first",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
"Schema2": {
"x-tablename": "schema_2",
"properties": {
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_2"},
},
},
},
"Schema3": {
"x-tablename": "schema_3",
"properties": {
"prop_3": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
},
False,
("Schema3", "prop_3", "association_1", "Schema1", "prop_1"),
id="many schema single property many-to-many same secondary first last",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
"Schema2": {
"x-tablename": "schema_2",
"properties": {
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_2"},
},
},
},
"Schema3": {
"x-tablename": "schema_3",
"properties": {
"prop_3": {
"type": "array",
"items": {"x-secondary": "association_2"},
},
},
},
},
False,
("Schema3", "prop_3", "association_2", "Schema2", "prop_2"),
id="many schema single property many-to-many same secondary last",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"prop_1": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
"Schema2": {
"x-tablename": "schema_2",
"properties": {
"prop_2": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
"Schema3": {
"x-tablename": "schema_3",
"properties": {
"prop_3": {
"type": "array",
"items": {"x-secondary": "association_1"},
},
},
},
},
False,
("Schema2", "prop_2", "association_1", "Schema1", "prop_1"),
id="many schema single property many-to-many same secondary all",
),
]
@pytest.mark.schemas
@pytest.mark.validate
@pytest.mark.parametrize("schemas, expected_valid, expected_reasons", CHECK_TESTS)
def test_check(schemas, expected_valid, expected_reasons):
"""
GIVEN schemas and expected result
WHEN check is called with the schemas
THEN the expected result is returned.
"""
returned_result = validation.unique_secondary.check(schemas=schemas)
assert returned_result.valid == expected_valid
if expected_reasons is not None:
for reason in expected_reasons:
assert reason in returned_result.reason
else:
assert returned_result.reason is None
| 31.841743
| 84
| 0.345747
|
794e78f71628c92829ed4fc0d205151128a0b6d4
| 3,525
|
py
|
Python
|
examples/pipeline/feldman_verifiable_sum/pipeline-feldman-verifiable-sum.py
|
hubert-he/FATE
|
6758e150bd7ca7d6f788f9a7a8c8aea7e6500363
|
[
"Apache-2.0"
] | 3,787
|
2019-08-30T04:55:10.000Z
|
2022-03-31T23:30:07.000Z
|
examples/pipeline/feldman_verifiable_sum/pipeline-feldman-verifiable-sum.py
|
hubert-he/FATE
|
6758e150bd7ca7d6f788f9a7a8c8aea7e6500363
|
[
"Apache-2.0"
] | 1,439
|
2019-08-29T16:35:52.000Z
|
2022-03-31T11:55:31.000Z
|
examples/pipeline/feldman_verifiable_sum/pipeline-feldman-verifiable-sum.py
|
hubert-he/FATE
|
6758e150bd7ca7d6f788f9a7a8c8aea7e6500363
|
[
"Apache-2.0"
] | 1,179
|
2019-08-29T16:18:32.000Z
|
2022-03-31T12:55:38.000Z
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import Reader
from pipeline.component import DataIO
from pipeline.component import FeldmanVerifiableSum
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
from pipeline.runtime.entity import JobParameters
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
hosts = parties.host
backend = config.backend
work_mode = config.work_mode
guest_train_data = {"name": "breast_homo_test", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_homo_test", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role="guest", party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=hosts)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role="host", party_id=hosts).component_param(table=host_train_data)
dataio_0 = DataIO(name="dataio_0")
# get and configure DataIO party instance of guest
dataio_0.get_party_instance(role="guest", party_id=guest).component_param(with_label=False, output_format="dense")
# get and configure DataIO party instance of host
dataio_0.get_party_instance(role="host", party_id=hosts).component_param(with_label=False)
# define FeldmanVerifiableSum components
feldmanverifiablesum_0 = FeldmanVerifiableSum(name="feldmanverifiablesum_0")
feldmanverifiablesum_0.get_party_instance(role="guest", party_id=guest).component_param(sum_cols=[1, 2, 3], q_n=6)
feldmanverifiablesum_0.get_party_instance(role="host", party_id=hosts).component_param(sum_cols=[1, 2, 3], q_n=6)
# add components to pipeline, in order of task execution.
pipeline.add_component(reader_0)
pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data))
pipeline.add_component(feldmanverifiablesum_0, data=Data(data=dataio_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
job_parameters = JobParameters(backend=backend, work_mode=work_mode)
pipeline.fit(job_parameters)
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 38.315217
| 118
| 0.743262
|
794e790b6e53e67407a7833c7f4f83bab8625c1d
| 1,820
|
py
|
Python
|
src/quocslib/timeevolution/piecewise_integrator.py
|
marcorossignolo/QuOCS
|
5ed631e2aebc42b226f5992daf27e2da75a89af9
|
[
"Apache-2.0"
] | null | null | null |
src/quocslib/timeevolution/piecewise_integrator.py
|
marcorossignolo/QuOCS
|
5ed631e2aebc42b226f5992daf27e2da75a89af9
|
[
"Apache-2.0"
] | null | null | null |
src/quocslib/timeevolution/piecewise_integrator.py
|
marcorossignolo/QuOCS
|
5ed631e2aebc42b226f5992daf27e2da75a89af9
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from scipy.linalg import expm
# can we do conditional import?
try:
import jax.scipy as jsp
except:
raise ImportError
def pw_evolution(U_store, drive, A, B, n_slices, dt):
"""Compute the piecewise evolution of a system defined by the
Hamiltonian H = A + drive * B and store the result in U_store
:param List[np.matrix] U_store: the storage for all of the computed propagators
:param np.array drive: an array of dimension n_controls x n_slices that contains the amplitudes of the pulse
:param np.matrix A: the drift Hamiltonian
:param List[np.matrix] B: the control Hamiltonians
:param int n_slices: number of slices
:param float dt: the duration of each time slice
:return None: Stores the new propagators so this doesn't return
"""
K = len(B)
for i in range(n_slices):
H = A
for k in range(K):
H = H + drive[k, i] * B[k]
U_store[i] = expm(-1j * dt * H)
return None
def pw_final_evolution(drive, A, B, n_slices, dt, u0):
"""Compute the piecewise evolution of a system defined by the
Hamiltonian H = A + drive * B and concatenate all the propagators
:param List[np.matrix] U_store: the storage for all of the computed propagators
:param np.array drive: an array of dimension n_controls x n_slices that contains the amplitudes of the pulse
:param np.matrix A: the drift Hamiltonian
:param List[np.matrix] B: the control Hamiltonians
:param int n_slices: number of slices
:param np.matrix u0: the initial density matrix to start from
:return np.matrix: the final propagator
"""
K = len(B)
U = u0
for i in range(n_slices):
H = A
for k in range(K):
H = H + drive[k, i] * B[k]
U = expm(-1j * dt * H) @ U
return U
| 35
| 112
| 0.663736
|
794e79b4d19525cab0ee659c77d7a4383a1c962c
| 3,607
|
py
|
Python
|
A3C/options.py
|
Francesco-Sovrano/Generic-Hierarchical-Deep-Reinforcement-Learning-for-Sentiment-Analysis
|
f6845b682176b76c97cbfc4e0d2dc8576e9883cb
|
[
"MIT"
] | 13
|
2018-11-04T16:51:41.000Z
|
2022-01-31T17:41:15.000Z
|
A3C/options.py
|
Francesco-Sovrano/Generic-Hierarchical-Deep-Reinforcement-Learning-for-Sentiment-Analysis
|
f6845b682176b76c97cbfc4e0d2dc8576e9883cb
|
[
"MIT"
] | null | null | null |
A3C/options.py
|
Francesco-Sovrano/Generic-Hierarchical-Deep-Reinforcement-Learning-for-Sentiment-Analysis
|
f6845b682176b76c97cbfc4e0d2dc8576e9883cb
|
[
"MIT"
] | 2
|
2018-12-12T08:45:30.000Z
|
2021-06-27T21:47:18.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def build():
# Common
tf.app.flags.DEFINE_boolean("use_gpu", False, "whether to use the GPU")
tf.app.flags.DEFINE_string("env_type", "sentipolc", "environment type")
tf.app.flags.DEFINE_string("checkpoint_dir", "./checkpoint", "checkpoint directory")
tf.app.flags.DEFINE_string("event_dir", "./events", "events directory")
tf.app.flags.DEFINE_string("log_dir", "./log", "events directory")
tf.app.flags.DEFINE_boolean("show_best_screenshots", True, "whether to save the best matches")
tf.app.flags.DEFINE_boolean("show_all_screenshots", False, "whether to save all the matches")
tf.app.flags.DEFINE_string("test_set_path", "./database/test_set_sentipolc16.csv", "test set")
tf.app.flags.DEFINE_string("training_set_path", "./database/training_set_sentipolc16.csv", "training set")
tf.app.flags.DEFINE_string("emoji_sentiment_lexicon", "./database/Emoji_Sentiment_Data_v1.0.csv", "emoji sentiment lexicon")
tf.app.flags.DEFINE_string("preprocessed_dict", "./database/preprocessed", "vectorized training set")
tf.app.flags.DEFINE_string("translated_lemma_tokens", "./database/translated_lemma_tokens", "cache of translated lemma tokens") # dictionary with translated lemma tokens
tf.app.flags.DEFINE_string("lexeme_sentiment_dict", "./database/lexeme_sentiment_dict", "cache of lexeme_sentiment") # lexeme sentiment dictionary
tf.app.flags.DEFINE_string("test_annotations", "./database/test_annotations", "cache of test_annotations")
tf.app.flags.DEFINE_string("training_annotations", "./database/training_annotations", "cache of training_annotations")
tf.app.flags.DEFINE_string("tagger_path", "./.env2/treetagger", "tagger path")
tf.app.flags.DEFINE_string("nltk_data", './.env2/nltk_data', "nltk data")
tf.app.flags.DEFINE_string("word2vec_path", './.env2/word2vec/cc.it.300.bin', "word2vec data")
tf.app.flags.DEFINE_string("task", "subjective, opos, oneg, ironic, lpos, lneg", "choose a combination of: subjective, opos, oneg, ironic, lpos, lneg")
tf.app.flags.DEFINE_string("granularity", "lemma", "lemma or token")
tf.app.flags.DEFINE_integer("gram_size", 1, "number of tokens/lemma to process at each step")
tf.app.flags.DEFINE_integer("match_count_for_evaluation", 200, "number of matches used for evaluation scores")
tf.app.flags.DEFINE_integer("parallel_size", 8, "parallel thread size")
tf.app.flags.DEFINE_integer("situation_count", 3, "number of partitions considered by the algorithm")
# For training
tf.app.flags.DEFINE_float("gamma", 0.99, "discount factor for rewards") # doesn't work: 0.75
tf.app.flags.DEFINE_integer("local_t_max", 5, "repeat step size") # doesn't work: 10
tf.app.flags.DEFINE_float("entropy_beta", 0.001, "entropy regularization constant")
tf.app.flags.DEFINE_integer("max_time_step", 6*10**6, "max time steps")
tf.app.flags.DEFINE_integer("save_interval_step", 10**4, "saving interval steps")
tf.app.flags.DEFINE_float("rmsp_alpha", 0.99, "decay parameter for rmsprop")
tf.app.flags.DEFINE_float("rmsp_epsilon", 0.1, "epsilon parameter for rmsprop")
tf.app.flags.DEFINE_float("initial_alpha_low", 1e-4, "log_uniform low limit for learning rate")
tf.app.flags.DEFINE_float("initial_alpha_high", 5e-3, "log_uniform high limit for learning rate")
tf.app.flags.DEFINE_float("initial_alpha_log_rate", 0.5, "log_uniform interpolate rate for learning rate")
tf.app.flags.DEFINE_float("grad_norm_clip", 40.0, "gradient norm clipping")
def get():
return tf.app.flags.FLAGS
| 69.365385
| 170
| 0.768506
|
794e7b067fca7a338c7c3ad4d8f2ebb65c68785f
| 13,468
|
py
|
Python
|
submodules/dqd/ribs/emitters/opt/_cma_es.py
|
JiangZehua/control-pcgrl3D
|
f9b04e65e1cbf70b7306f4df251450d83c6fb2be
|
[
"MIT"
] | null | null | null |
submodules/dqd/ribs/emitters/opt/_cma_es.py
|
JiangZehua/control-pcgrl3D
|
f9b04e65e1cbf70b7306f4df251450d83c6fb2be
|
[
"MIT"
] | null | null | null |
submodules/dqd/ribs/emitters/opt/_cma_es.py
|
JiangZehua/control-pcgrl3D
|
f9b04e65e1cbf70b7306f4df251450d83c6fb2be
|
[
"MIT"
] | null | null | null |
"""Implementation of CMA-ES that can be used across various emitters.
Adapted from Nikolaus Hansen's pycma:
https://github.com/CMA-ES/pycma/blob/master/cma/purecma.py
"""
import numba as nb
import numpy as np
class DecompMatrix:
"""Maintains a covariance matrix and its eigendecomposition.
CMA-ES requires the inverse square root of the covariance matrix in order to
sample new solutions from a multivariate normal distribution. However,
calculating the inverse square root is an O(n^3) operation because an
eigendecomposition is involved. (n is the dimensionality of the search
space). To amortize the operation to O(n^2) and avoid recomputing, this
class maintains the inverse square root and waits several evals before
recomputing the inverse square root.
Args:
dimension (int): Size of the (square) covariance matrix.
dtype (str or data-type): Data type of the matrix, typically np.float32
or np.float64.
"""
def __init__(self, dimension, dtype):
self.cov = np.eye(dimension, dtype=dtype)
self.eigenbasis = np.eye(dimension, dtype=dtype)
self.eigenvalues = np.ones((dimension,), dtype=dtype)
self.condition_number = 1
self.invsqrt = np.eye(dimension, dtype=dtype) # C^(-1/2)
self.dtype = dtype
# The last evaluation on which the eigensystem was updated.
self.updated_eval = 0
def update_eigensystem(self, current_eval, lazy_gap_evals):
"""Updates the covariance matrix if lazy_gap_evals have passed.
We have attempted to use numba in this method, but since np.linalg.eigh
is the bottleneck, and it is already implemented in BLAS or LAPACK,
numba does not help much (and actually slows things down a bit).
Args:
current_eval (int): The number of solutions the optimizer has
evaluated so far.
lazy_gap_evals (int): The number of evaluations to wait between
covariance matrix updates.
"""
if current_eval <= self.updated_eval + lazy_gap_evals:
return
# Force symmetry.
self.cov = np.maximum(self.cov, self.cov.T)
# Note: eigh returns float64, so we must cast it.
self.eigenvalues, self.eigenbasis = np.linalg.eigh(self.cov)
self.eigenvalues = self.eigenvalues.real.astype(self.dtype)
self.eigenbasis = self.eigenbasis.real.astype(self.dtype)
self.condition_number = (np.max(self.eigenvalues) /
np.min(self.eigenvalues))
self.invsqrt = (self.eigenbasis *
(1 / np.sqrt(self.eigenvalues))) @ self.eigenbasis.T
# Force symmetry.
self.invsqrt = np.maximum(self.invsqrt, self.invsqrt.T)
self.updated_eval = current_eval
class CMAEvolutionStrategy:
"""CMA-ES optimizer for use with emitters.
The basic usage is:
- Initialize the optimizer and reset it.
- Repeatedly:
- Request new solutions with ask()
- Rank the solutions in the emitter (better solutions come first) and pass
them back with tell().
- Use check_stop() to see if the optimizer has reached a stopping
condition, and if so, call reset().
Args:
sigma0 (float): Initial step size.
batch_size (int): Number of solutions to evaluate at a time. If None, we
calculate a default batch size based on solution_dim.
solution_dim (int): Size of the solution space.
weight_rule (str): Method for generating weights. Either "truncation"
(positive weights only) or "active" (include negative weights).
seed (int): Seed for the random number generator.
dtype (str or data-type): Data type of solutions.
"""
def __init__(self, sigma0, batch_size, solution_dim, weight_rule, seed,
dtype):
self.batch_size = (4 + int(3 * np.log(solution_dim))
if batch_size is None else batch_size)
self.sigma0 = sigma0
self.solution_dim = solution_dim
self.dtype = dtype
if weight_rule not in ["truncation", "active"]:
raise ValueError(f"Invalid weight_rule {weight_rule}")
self.weight_rule = weight_rule
# Calculate gap between covariance matrix updates.
num_parents = self.batch_size // 2
*_, c1, cmu = self._calc_strat_params(self.solution_dim, num_parents,
self.weight_rule)
self.lazy_gap_evals = (0.5 * self.solution_dim * self.batch_size *
(c1 + cmu)**-1 / self.solution_dim**2)
# Strategy-specific params -> initialized in reset().
self.current_eval = None
self.mean = None
self.sigma = None
self.pc = None
self.ps = None
self.cov = None
self._rng = np.random.default_rng(seed)
def reset(self, x0):
"""Resets the optimizer to start at x0.
Args:
x0 (np.ndarray): Initial mean.
"""
self.current_eval = 0
self.sigma = self.sigma0
self.mean = np.array(x0, self.dtype)
# Setup evo path variables.
self.pc = np.zeros(self.solution_dim, dtype=self.dtype)
self.ps = np.zeros(self.solution_dim, dtype=self.dtype)
# Setup the covariance matrix.
self.cov = DecompMatrix(self.solution_dim, self.dtype)
def check_stop(self, ranking_values):
"""Checks if the optimization should stop and be reset.
Tolerances come from CMA-ES.
Args:
ranking_values (np.ndarray): Array of objective values of the
solutions, sorted in the same order that the solutions were
sorted when passed to tell().
Returns:
True if any of the stopping conditions are satisfied.
"""
if self.cov.condition_number > 1e14:
return True
# Area of distribution too small.
area = self.sigma * np.sqrt(max(self.cov.eigenvalues))
if area < 1e-11:
return True
# Fitness is too flat (only applies if there are at least 2
# parents).
if (len(ranking_values) >= 2 and
np.abs(ranking_values[0] - ranking_values[-1]) < 1e-12):
return True
return False
@staticmethod
@nb.jit(nopython=True)
def _transform_and_check_sol(unscaled_params, transform_mat, mean,
lower_bounds, upper_bounds):
"""Numba helper for transforming parameters to the solution space."""
solutions = ((transform_mat @ unscaled_params.T).T +
np.expand_dims(mean, axis=0))
out_of_bounds = np.logical_or(
solutions < np.expand_dims(lower_bounds, axis=0),
solutions > np.expand_dims(upper_bounds, axis=0),
)
return solutions, out_of_bounds
def ask(self, lower_bounds, upper_bounds):
"""Samples new solutions from the Gaussian distribution.
Args:
lower_bounds (float or np.ndarray): scalar or (solution_dim,) array
indicating lower bounds of the solution space. Scalars specify
the same bound for the entire space, while arrays specify a
bound for each dimension. Pass -np.inf in the array or scalar to
indicated unbounded space.
upper_bounds (float or np.ndarray): Same as above, but for upper
bounds (and pass np.inf instead of -np.inf).
"""
self.cov.update_eigensystem(self.current_eval, self.lazy_gap_evals)
solutions = np.empty((self.batch_size, self.solution_dim),
dtype=self.dtype)
transform_mat = self.cov.eigenbasis * np.sqrt(self.cov.eigenvalues)
# Resampling method for bound constraints -> sample new solutions until
# all solutions are within bounds.
remaining_indices = np.arange(self.batch_size)
while len(remaining_indices) > 0:
unscaled_params = self._rng.normal(
0.0,
self.sigma,
(len(remaining_indices), self.solution_dim),
).astype(self.dtype)
new_solutions, out_of_bounds = self._transform_and_check_sol(
unscaled_params, transform_mat, self.mean, lower_bounds,
upper_bounds)
solutions[remaining_indices] = new_solutions
# Find indices in remaining_indices that are still out of bounds
# (out_of_bounds indicates whether each entry in each solution is
# out of bounds).
out_of_bounds_indices = np.where(np.any(out_of_bounds, axis=1))[0]
remaining_indices = remaining_indices[out_of_bounds_indices]
return np.asarray(solutions)
@staticmethod
@nb.jit(nopython=True)
def _calc_strat_params(solution_dim, num_parents, weight_rule):
"""Calculates weights, mueff, and learning rates for CMA-ES."""
# Create fresh weights for the number of parents found.
if weight_rule == "truncation":
weights = (np.log(num_parents + 0.5) -
np.log(np.arange(1, num_parents + 1)))
total_weights = np.sum(weights)
weights = weights / total_weights
mueff = np.sum(weights)**2 / np.sum(weights**2)
elif weight_rule == "active":
weights = None
# Dynamically update these strategy-specific parameters.
cc = ((4 + mueff / solution_dim) /
(solution_dim + 4 + 2 * mueff / solution_dim))
cs = (mueff + 2) / (solution_dim + mueff + 5)
c1 = 2 / ((solution_dim + 1.3)**2 + mueff)
cmu = min(
1 - c1,
2 * (mueff - 2 + 1 / mueff) / ((solution_dim + 2)**2 + mueff),
)
return weights, mueff, cc, cs, c1, cmu
@staticmethod
@nb.jit(nopython=True)
def _calc_mean(parents, weights):
"""Numba helper for calculating the new mean."""
return np.sum(parents * np.expand_dims(weights, axis=1), axis=0)
@staticmethod
@nb.jit(nopython=True)
def _calc_weighted_ys(parents, old_mean, weights):
"""Calculates y's for use in rank-mu update."""
ys = parents - np.expand_dims(old_mean, axis=0)
return ys * np.expand_dims(weights, axis=1), ys
@staticmethod
@nb.jit(nopython=True)
def _calc_cov_update(cov, c1a, cmu, c1, pc, sigma, rank_mu_update):
"""Calculates covariance matrix update."""
rank_one_update = c1 * np.outer(pc, pc)
return (cov * (1 - c1a - cmu) + rank_one_update * c1 +
rank_mu_update * cmu / (sigma**2))
def tell(self, solutions, num_parents):
"""Passes the solutions back to the optimizer.
Note that while we use numba to optimize certain parts of this function
(in particular the covariance update), we are more cautious about other
parts because the code that uses numba is significantly harder to read
and maintain.
Args:
solutions (np.ndarray): Array of ranked solutions. The user should
have determined some way to rank the solutions, such as by
objective value. It is important that _all_ of the solutions
initially given in ask() are returned here.
num_parents (int): Number of best solutions to select.
"""
self.current_eval += len(solutions)
if num_parents == 0:
return
parents = solutions[:num_parents]
weights, mueff, cc, cs, c1, cmu = self._calc_strat_params(
self.solution_dim, num_parents, self.weight_rule)
damps = (1 + 2 * max(
0,
np.sqrt((mueff - 1) / (self.solution_dim + 1)) - 1,
) + cs)
# Recombination of the new mean.
old_mean = self.mean
self.mean = self._calc_mean(parents, weights)
# Update the evo path.
y = self.mean - old_mean
z = np.matmul(self.cov.invsqrt, y)
self.ps = ((1 - cs) * self.ps +
(np.sqrt(cs * (2 - cs) * mueff) / self.sigma) * z)
left = (np.sum(np.square(self.ps)) / self.solution_dim /
(1 - (1 - cs)**(2 * self.current_eval / self.batch_size)))
right = 2 + 4. / (self.solution_dim + 1)
hsig = 1 if left < right else 0
self.pc = ((1 - cc) * self.pc + hsig * np.sqrt(cc *
(2 - cc) * mueff) * y)
# Adapt the covariance matrix.
weighted_ys, ys = self._calc_weighted_ys(parents, old_mean, weights)
# Equivalent to calculating the outer product of each ys[i] with itself
# and taking a weighted sum of the outer products. Unfortunately, numba
# does not support einsum.
rank_mu_update = np.einsum("ki,kj", weighted_ys, ys)
c1a = c1 * (1 - (1 - hsig**2) * cc * (2 - cc))
self.cov.cov = self._calc_cov_update(self.cov.cov, c1a, cmu, c1,
self.pc, self.sigma,
rank_mu_update)
# Update sigma.
cn, sum_square_ps = cs / damps, np.sum(np.square(self.ps))
self.sigma *= np.exp(
min(1,
cn * (sum_square_ps / self.solution_dim - 1) / 2))
| 40.812121
| 80
| 0.604396
|
794e7ceba492090ad1d58d9ed53edc74bb440771
| 644
|
py
|
Python
|
convertall.py
|
dholth/wgc
|
4e3bd877420fbacd66e961a32adaf1428e24bd42
|
[
"0BSD"
] | null | null | null |
convertall.py
|
dholth/wgc
|
4e3bd877420fbacd66e961a32adaf1428e24bd42
|
[
"0BSD"
] | null | null | null |
convertall.py
|
dholth/wgc
|
4e3bd877420fbacd66e961a32adaf1428e24bd42
|
[
"0BSD"
] | null | null | null |
#!/bin/python
import glob
import wgc2
from pathlib import Path
if False:
"""
real 0m51.348s
user 0m40.623s
sys 0m8.666s
"""
outdir = "converted"
for i in glob.glob("wheels/*.whl"):
infile = Path(i)
outfile = Path(outdir).joinpath(infile.name)
wgc2.recompress(i, outfile)
else:
"""
real 1m24.037s
user 1m21.935s
sys 0m1.851s
"""
outdir = "rewritten"
for i in glob.glob("wheels/*.whl"):
infile = Path(i)
outfile = Path(outdir).joinpath(infile.name)
wgc2.rewrite(i, outfile)
"""
$ du -hs converted/ rewritten/
448M converted/
543M rewritten/
"""
| 18.4
| 52
| 0.599379
|
794e8038b98f45f8c11052caf9cbe98d97e08bbe
| 949
|
py
|
Python
|
Logger.py
|
brilliant-ember/YouTube-Channel-Downloader
|
6f17a19066a0deeae7e16e91860d04285cdb2f5d
|
[
"Unlicense"
] | null | null | null |
Logger.py
|
brilliant-ember/YouTube-Channel-Downloader
|
6f17a19066a0deeae7e16e91860d04285cdb2f5d
|
[
"Unlicense"
] | 22
|
2021-11-02T10:27:06.000Z
|
2022-01-08T12:16:40.000Z
|
Logger.py
|
brilliant-ember/YouTube-Channel-Downloader
|
6f17a19066a0deeae7e16e91860d04285cdb2f5d
|
[
"Unlicense"
] | null | null | null |
import logging
from utils import get_now_date
class Log:
def __init__(self, log_file_path):
logging.basicConfig(filename=log_file_path, format='%(levelname)s - %(message)s', level=logging.INFO)
def log(self, msg:str, level="info", print_log=True) -> None:
#TODO add more log levels like debug, warn , critical ...etc
level = level.lower()
msg = f'{get_now_date()} - {msg}'
# print(msg, level)
if level=="info":
logging.info(msg)
elif level == "error":
logging.error(msg)
elif level == "warn" or level == "warning":
logging.warning(msg)
elif level == 'critical':
logging.critical(msg)
elif level == 'debug':
logging.debug(msg)
else:
print ("unknown level, will default to info")
logging.warning("Passed error level is not good, will use info level to print next log entry")
logging.info(msg)
if print_log:
print(msg)
def exit(self):
self.log("Exiting", print_log=True)
logging.shutdown()
| 27.911765
| 103
| 0.682824
|
794e8088807412a9788164fd7106d6718880e3b4
| 217
|
py
|
Python
|
Sorting/test_distinct.py
|
matt-sm/codility
|
355eef4a1be67f5758faa884cb97a5c0ff65169e
|
[
"MIT"
] | null | null | null |
Sorting/test_distinct.py
|
matt-sm/codility
|
355eef4a1be67f5758faa884cb97a5c0ff65169e
|
[
"MIT"
] | null | null | null |
Sorting/test_distinct.py
|
matt-sm/codility
|
355eef4a1be67f5758faa884cb97a5c0ff65169e
|
[
"MIT"
] | null | null | null |
import pytest
import distinct
testdata = [([2, 1, 1, 2, 3, 1], 3), ([9, 9, 9, 9, 9], 1)]
@pytest.mark.parametrize("A,expected", testdata)
def test_distinct(A, expected):
assert distinct.solution(A) == expected
| 21.7
| 58
| 0.64977
|
794e814b317313bb02a3f3b40e966b9c7d038606
| 701
|
py
|
Python
|
src/part_2_automation/data_driven/runner.py
|
AndreiHustiuc/IT_Factory_Course
|
c6f3e4a9282a1c19c0f52c79f0c81f026814a02a
|
[
"MIT"
] | null | null | null |
src/part_2_automation/data_driven/runner.py
|
AndreiHustiuc/IT_Factory_Course
|
c6f3e4a9282a1c19c0f52c79f0c81f026814a02a
|
[
"MIT"
] | null | null | null |
src/part_2_automation/data_driven/runner.py
|
AndreiHustiuc/IT_Factory_Course
|
c6f3e4a9282a1c19c0f52c79f0c81f026814a02a
|
[
"MIT"
] | 1
|
2022-03-16T10:39:03.000Z
|
2022-03-16T10:39:03.000Z
|
import os
from fuzzywuzzy import fuzz
from src.part_2_automation.data_driven.parser import get_input_data
from src.part_2_automation.data_driven.parser_csv_file import get_input_data_csv
def my_test_case(input_data, expected_data):
similarity = fuzz.partial_ratio(input_data, expected_data)
if similarity > 90:
print("passed")
elif 30 < similarity < 90:
print('warning')
else:
print('not pass')
return similarity
# print(os.getcwd())
data = get_input_data(os.getcwd() + '/input_data/input_data.json')
for key, value in data.items():
# print(key, value)
status = my_test_case(data[key]['input'], data[key]['expected'])
print(status)
| 21.242424
| 80
| 0.707561
|
794e81c002c7776a5673aa6ceac3b61ac83cf1b6
| 437
|
py
|
Python
|
frontend/views.py
|
hasadna/birdee
|
304a9e259d69e49535f4529288a01967c9c4c15a
|
[
"Apache-2.0"
] | 1
|
2017-10-29T05:56:00.000Z
|
2017-10-29T05:56:00.000Z
|
frontend/views.py
|
hasadna/birdee
|
304a9e259d69e49535f4529288a01967c9c4c15a
|
[
"Apache-2.0"
] | null | null | null |
frontend/views.py
|
hasadna/birdee
|
304a9e259d69e49535f4529288a01967c9c4c15a
|
[
"Apache-2.0"
] | null | null | null |
from account.forms import AccountCreationForm
from account.models import Account
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView, CreateView
class MainView(LoginRequiredMixin, TemplateView):
template_name = 'frontend/main.html'
class AccountRegisterView(CreateView):
model = Account
form_class = AccountCreationForm
template_name = 'registration/register.html'
| 29.133333
| 57
| 0.814645
|
794e8210f82e3d23df9543aa44d8b2195ab7e407
| 621
|
py
|
Python
|
homeassistant/components/nest/sensor.py
|
galihmelon/core
|
0c852b5f816c9b21f244b7acebfcc952ff29b937
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/nest/sensor.py
|
galihmelon/core
|
0c852b5f816c9b21f244b7acebfcc952ff29b937
|
[
"Apache-2.0"
] | 47
|
2020-07-15T06:41:53.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/nest/sensor.py
|
tedsluis/home-assistant
|
cc776214772c51d0db808852306fa6762b9616e1
|
[
"Apache-2.0"
] | 1
|
2018-08-03T20:06:38.000Z
|
2018-08-03T20:06:38.000Z
|
"""Support for Nest sensors that dispatches between API versions."""
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from .const import DATA_SDM
from .sensor_legacy import async_setup_legacy_entry
from .sensor_sdm import async_setup_sdm_entry
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the sensors."""
if DATA_SDM not in entry.data:
await async_setup_legacy_entry(hass, entry, async_add_entities)
await async_setup_sdm_entry(hass, entry, async_add_entities)
| 34.5
| 71
| 0.800322
|
794e8220aef17a5a3d56ead8b62c7c81386c783d
| 5,587
|
py
|
Python
|
app/user/tests/test_user_api.py
|
dushdesh/django-recipe-api
|
6343720ec6270fa8dc927677a7af30431e4efbf8
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
dushdesh/django-recipe-api
|
6343720ec6270fa8dc927677a7af30431e4efbf8
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
dushdesh/django-recipe-api
|
6343720ec6270fa8dc927677a7af30431e4efbf8
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
USER_TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating a valid user is successful"""
payload = {
'email': 'rinkidink@test.com',
'password': 'rinkidinkstinks',
'name': 'TestRink',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_create_user_exists_failure(self):
"""Creating user that already exists fails"""
payload = {
'email': 'lorem@impsum.com',
'password': 'loremipsumdolor',
'name': 'Test'
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test password must be more then 4 chars"""
payload = {
'email': 'lorem@impsum.com',
'password': 'lore',
'name': 'Test'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_user_token(self):
"""Test that a token is created for the user"""
payload = {
'email': 'lorem@impsum.com',
'password': 'loremipsum'
}
create_user(**payload)
res = self.client.post(USER_TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_creds(self):
"""Test token is not created with invalid creds"""
create_user(email='lorem@impsum.com', password='loremipsum')
payload = {
'email': 'lorem@impsum.com',
'password': 'wrongpassword'
}
res = self.client.post(USER_TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_with_user_not_created(self):
"""Token not created if user does not exists"""
payload = {
'email': 'lorem@impsum.com',
'password': 'loremipsum'
}
res = self.client.post(USER_TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field_password(self):
"""Test password is required to issue token"""
res = self.client.post(
USER_TOKEN_URL,
{
'email': 'some',
'password': ''
}
)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field_email(self):
"""Test email is required to issue token"""
res = self.client.post(
USER_TOKEN_URL,
{
'email': '',
'password': 'loremipsum',
}
)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauth(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserAPITests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='gandalf@lotr.com',
password='youshallnotpass',
name='Gandalf the Grey'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile of logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(
res.data,
{
'email': self.user.email,
'name': self.user.name,
}
)
def test_post_me_not_allowed(self):
"""Test POST is not allowed on ME url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile"""
payload = {'name': 'Gandalf the white', 'password': 'reborn'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 31.744318
| 77
| 0.615894
|
794e826dae8d94e015f0fb63e01a4edae4104764
| 10,186
|
py
|
Python
|
elasticapm/transport/http.py
|
snappyflow/sftrace-python-agent
|
dc186b55aef86e7ec7d8d755c72b16c292eac9b3
|
[
"BSD-3-Clause"
] | null | null | null |
elasticapm/transport/http.py
|
snappyflow/sftrace-python-agent
|
dc186b55aef86e7ec7d8d755c72b16c292eac9b3
|
[
"BSD-3-Clause"
] | null | null | null |
elasticapm/transport/http.py
|
snappyflow/sftrace-python-agent
|
dc186b55aef86e7ec7d8d755c72b16c292eac9b3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hashlib
import json
import re
import ssl
import urllib3
from urllib3.exceptions import MaxRetryError, TimeoutError
from elasticapm.transport.exceptions import TransportException
from elasticapm.transport.http_base import HTTPTransportBase
from elasticapm.utils import compat, json_encoder, read_pem_file
from elasticapm.utils.logging import get_logger
try:
import certifi
except ImportError:
certifi = None
logger = get_logger("elasticapm.transport.http")
class Transport(HTTPTransportBase):
def __init__(self, url: str, *args, **kwargs) -> None:
super(Transport, self).__init__(url, *args, **kwargs)
pool_kwargs = {"cert_reqs": "CERT_REQUIRED", "ca_certs": self.ca_certs, "block": True}
if url.startswith("https"):
if self._server_cert:
pool_kwargs.update(
{"assert_fingerprint": self.cert_fingerprint, "assert_hostname": False, "cert_reqs": ssl.CERT_NONE}
)
del pool_kwargs["ca_certs"]
elif not self._verify_server_cert:
pool_kwargs["cert_reqs"] = ssl.CERT_NONE
pool_kwargs["assert_hostname"] = False
self._pool_kwargs = pool_kwargs
self._http = None
self._url = url
def send(self, data):
response = None
headers = self._headers.copy() if self._headers else {}
headers.update(self.auth_headers)
if compat.PY2 and isinstance(self._url, compat.text_type):
url = self._url.encode("utf-8")
else:
url = self._url
try:
try:
response = self.http.urlopen(
"POST", url, body=data, headers=headers, timeout=self._timeout, preload_content=False
)
logger.debug("Sent request, url=%s size=%.2fkb status=%s", url, len(data) / 1024.0, response.status)
except Exception as e:
print_trace = True
if isinstance(e, MaxRetryError) and isinstance(e.reason, TimeoutError):
message = "Connection to APM Server timed out " "(url: %s, timeout: %s seconds)" % (
self._url,
self._timeout,
)
print_trace = False
else:
message = "Unable to reach APM Server: %s (url: %s)" % (e, self._url)
raise TransportException(message, data, print_trace=print_trace)
body = response.read()
if response.status >= 400:
if response.status == 429: # rate-limited
message = "Temporarily rate limited: "
print_trace = False
else:
message = "HTTP %s: " % response.status
print_trace = True
message += body.decode("utf8", errors="replace")[:10000]
raise TransportException(message, data, print_trace=print_trace)
return response.getheader("Location")
finally:
if response:
response.close()
@property
def http(self) -> urllib3.PoolManager:
if not self._http:
url_parts = compat.urlparse.urlparse(self._url)
proxies = compat.getproxies_environment()
proxy_url = proxies.get("https", proxies.get("http", None))
if proxy_url and not compat.proxy_bypass_environment(url_parts.netloc):
self._http = urllib3.ProxyManager(proxy_url, **self._pool_kwargs)
else:
self._http = urllib3.PoolManager(**self._pool_kwargs)
return self._http
def handle_fork(self) -> None:
# reset http pool to avoid sharing connections with the parent process
self._http = None
def get_config(self, current_version=None, keys=None):
"""
Gets configuration from a remote APM Server
:param current_version: version of the current configuration
:param keys: a JSON-serializable dict to identify this instance, e.g.
{
"service": {
"name": "foo",
"environment": "bar"
}
}
:return: a three-tuple of new version, config dictionary and validity in seconds.
Any element of the tuple can be None.
"""
url = self._config_url
data = json_encoder.dumps(keys).encode("utf-8")
headers = self._headers.copy()
headers[b"Content-Type"] = "application/json"
headers.pop(b"Content-Encoding", None) # remove gzip content-encoding header
headers.update(self.auth_headers)
max_age = 300
if current_version:
headers["If-None-Match"] = current_version
try:
response = self.http.urlopen(
"POST", url, body=data, headers=headers, timeout=self._timeout, preload_content=False
)
except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:
logger.debug("HTTP error while fetching remote config: %s", compat.text_type(e))
return current_version, None, max_age
body = response.read()
if "Cache-Control" in response.headers:
try:
max_age = int(next(re.finditer(r"max-age=(\d+)", response.headers["Cache-Control"])).groups()[0])
except StopIteration:
logger.debug("Could not parse Cache-Control header: %s", response.headers["Cache-Control"])
if response.status == 304:
# config is unchanged, return
logger.debug("Configuration unchanged")
return current_version, None, max_age
elif response.status >= 400:
return None, None, max_age
if not body:
logger.debug("APM Server answered with empty body and status code %s", response.status)
return current_version, None, max_age
body = body.decode("utf-8")
try:
data = json_encoder.loads(body)
return response.headers.get("Etag"), data, max_age
except json.JSONDecodeError:
logger.warning("Failed decoding APM Server response as JSON: %s", body)
return current_version, None, max_age
def _process_queue(self):
# if not self.client.server_version:
# self.fetch_server_info()
super()._process_queue()
def fetch_server_info(self):
headers = self._headers.copy() if self._headers else {}
headers.update(self.auth_headers)
headers["accept"] = "text/plain"
try:
response = self.http.urlopen("GET", self._server_info_url, headers=headers, timeout=self._timeout)
body = response.data
data = json_encoder.loads(body.decode("utf8"))
version = data["version"]
logger.info("Fetched APM Server version %s", version)
self.client.server_version = version_string_to_tuple(version)
except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:
logger.warning("HTTP error while fetching server information: %s", str(e))
except json.JSONDecodeError as e:
logger.warning("JSON decoding error while fetching server information: %s", str(e))
except (KeyError, TypeError):
logger.warning("No version key found in server response: %s", response.data)
@property
def cert_fingerprint(self):
if self._server_cert:
with open(self._server_cert, "rb") as f:
cert_data = read_pem_file(f)
digest = hashlib.sha256()
digest.update(cert_data)
return digest.hexdigest()
return None
@property
def auth_headers(self):
headers = super(Transport, self).auth_headers
return {k.encode("ascii"): v.encode("ascii") for k, v in compat.iteritems(headers)}
@property
def ca_certs(self):
"""
Return location of certificate store. If it is available and not disabled via setting,
this will return the location of the certifi certificate store.
"""
return certifi.where() if (certifi and self.client.config.use_certifi) else None
def version_string_to_tuple(version):
if version:
version_parts = re.split(r"[.\-]", version)
return tuple(int(p) if p.isdigit() else p for p in version_parts)
return ()
# left for backwards compatibility
AsyncTransport = Transport
| 42.441667
| 119
| 0.628215
|
794e82c374da191c7df59fbd4fa68619964ac713
| 306
|
py
|
Python
|
Python/numOfAlpha.py
|
Mario263/Hacktoberfest_2021
|
57965f48d3b19d25d2c0b75525eab4c4dce0157a
|
[
"MIT"
] | 16
|
2021-10-15T08:41:52.000Z
|
2022-01-02T11:14:30.000Z
|
Python/numOfAlpha.py
|
Mario263/Hacktoberfest_2021
|
57965f48d3b19d25d2c0b75525eab4c4dce0157a
|
[
"MIT"
] | 5
|
2021-10-17T06:04:41.000Z
|
2021-10-30T16:45:40.000Z
|
Python/numOfAlpha.py
|
Mario263/Hacktoberfest_2021
|
57965f48d3b19d25d2c0b75525eab4c4dce0157a
|
[
"MIT"
] | 43
|
2021-10-15T14:03:48.000Z
|
2022-03-09T21:32:46.000Z
|
text = input("Enter text: ")
alpha,digit,space = 0,0,0
for x in text:
if x.isdigit():
digit+=1
elif x.isspace():
space+=1
elif x.isalpha():
alpha+=1
print('Number of Alphabets =',alpha)
print('Number of Digits =',digit)
print('Number of Spaces =',space)
| 20.4
| 37
| 0.565359
|
794e83666d92896681bb16e0eaecef54d57c2277
| 1,719
|
py
|
Python
|
official/vision/configs/video_classification_test.py
|
mcasanova1445/models
|
37be0fdb4abccca633bb3199a4e6f3f71cd174d9
|
[
"Apache-2.0"
] | 1
|
2022-03-04T02:08:52.000Z
|
2022-03-04T02:08:52.000Z
|
official/vision/configs/video_classification_test.py
|
mcasanova1445/models
|
37be0fdb4abccca633bb3199a4e6f3f71cd174d9
|
[
"Apache-2.0"
] | null | null | null |
official/vision/configs/video_classification_test.py
|
mcasanova1445/models
|
37be0fdb4abccca633bb3199a4e6f3f71cd174d9
|
[
"Apache-2.0"
] | 1
|
2022-03-21T13:47:02.000Z
|
2022-03-21T13:47:02.000Z
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for video_classification."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.vision.configs import video_classification as exp_cfg
class VideoClassificationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(('video_classification',),
('video_classification_kinetics600',))
def test_video_classification_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.VideoClassificationTask)
self.assertIsInstance(config.task.model, exp_cfg.VideoClassificationModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 37.369565
| 78
| 0.774869
|
794e8428eb5a6001068a83715f4ad6acc4994e3f
| 105,171
|
py
|
Python
|
pysnmp-with-texts/SNANET-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/SNANET-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/SNANET-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module SNANET-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SNANET-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:08:02 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, enterprises, Gauge32, TimeTicks, NotificationType, Bits, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, NotificationType, Counter32, IpAddress, ObjectIdentity, Integer32, Unsigned32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "enterprises", "Gauge32", "TimeTicks", "NotificationType", "Bits", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "NotificationType", "Counter32", "IpAddress", "ObjectIdentity", "Integer32", "Unsigned32", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
unisys = MibIdentifier((1, 3, 6, 1, 4, 1, 223))
dcp = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8))
snanet = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8, 3))
prodInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8, 3, 1))
t5node = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8, 3, 2))
subarea = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8, 3, 3))
snaNau = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8, 3, 4))
snaSession = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8, 3, 5))
snaLink = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8, 3, 6))
prodInfoDesc = MibScalar((1, 3, 6, 1, 4, 1, 223, 8, 3, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: prodInfoDesc.setStatus('mandatory')
if mibBuilder.loadTexts: prodInfoDesc.setDescription('A textual description of the software release. Includes the release level and the internal revision level (example: SNA/net Release 5R2 Revision 5.2.10 Installed 06/03/94 07:10 ).')
prodInfoFeatures = MibScalar((1, 3, 6, 1, 4, 1, 223, 8, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: prodInfoFeatures.setStatus('mandatory')
if mibBuilder.loadTexts: prodInfoFeatures.setDescription('A value which indicates the features included in the software release. The value is a sum. This sum initially takes the value zero, then for each feature a value is assigned corresponding to a power of 2, such that a unique number is generated for each combination of features. Feature 1 Cross-Domain Resource Manager 2 Terminal Connect 4 PU T2.0 Inverted 8 Node Type 2.1 Low Entry Node (len) 16 Network Packet-Switching Interface (NPSI) 32 Remote Batch File Transfer Extended (RBFTE)')
t5nodeTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 1), )
if mibBuilder.loadTexts: t5nodeTable.setStatus('mandatory')
if mibBuilder.loadTexts: t5nodeTable.setDescription('This table contains information about the Type 5 node functionality (SSCP) provided by the SNA/net product. Multiple nodes can be provided by a single SNA/net product.')
t5nodeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 1, 1), ).setIndexNames((0, "SNANET-MIB", "t5nodeIndex"))
if mibBuilder.loadTexts: t5nodeEntry.setStatus('mandatory')
if mibBuilder.loadTexts: t5nodeEntry.setDescription('Entry contains information about the Type 5 node functionality (SSCP).')
t5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: t5nodeIndex.setDescription('SNA/net can appear as more than one t5node. Used to index instances of this object. Value is assigned by the agent and remains constant.')
t5nodeDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5nodeDomainName.setStatus('mandatory')
if mibBuilder.loadTexts: t5nodeDomainName.setDescription('The configured name for this domain. This is also the t5nodeSscpName, if t5nodeSscpName is a 0 length string.')
t5nodeOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("up", 2), ("down", 3), ("standby", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5nodeOperState.setStatus('mandatory')
if mibBuilder.loadTexts: t5nodeOperState.setDescription('The operational state of the Type 5 Domain.')
t5nodeSubareaNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5nodeSubareaNumber.setStatus('mandatory')
if mibBuilder.loadTexts: t5nodeSubareaNumber.setDescription('The subarea number for this domain.')
t5nodeSscpName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5nodeSscpName.setStatus('mandatory')
if mibBuilder.loadTexts: t5nodeSscpName.setDescription('The name of the SSCP. Must be unique in the SNA subnetwork. It must match the name of the VTAM CDRM macro that defines this t5node as a CDRM to VTAM. May be a zero length string, in which case, the t5nodeDomainName is the name of the SSCP.')
t5nodeNetworkName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 1, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5nodeNetworkName.setStatus('mandatory')
if mibBuilder.loadTexts: t5nodeNetworkName.setDescription('The SNA network name.')
t5nodeSscpId = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5nodeSscpId.setStatus('mandatory')
if mibBuilder.loadTexts: t5nodeSscpId.setDescription('This value is used in ACTCDRM messages sent to VTAM.')
t5nodePuName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5nodePuName.setStatus('mandatory')
if mibBuilder.loadTexts: t5nodePuName.setDescription('The PU identifier of this node.')
t5CdrmTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 2), )
if mibBuilder.loadTexts: t5CdrmTable.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrmTable.setDescription('This table contains information about other Crossdomain Resource Managers known to this type 5 node.')
t5CdrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 2, 1), ).setIndexNames((0, "SNANET-MIB", "t5CdrmT5nodeIndex"), (0, "SNANET-MIB", "t5CdrmName"))
if mibBuilder.loadTexts: t5CdrmEntry.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrmEntry.setDescription('The entry contains information about CDRMs. Variables have read-only access. The variable t5CdrmAdminState has read-write access and is used to control a CDRM.')
t5CdrmT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrmT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrmT5nodeIndex.setDescription('Index in the t5nodetable of the t5node this CDRM is associated with.')
t5CdrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrmName.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrmName.setDescription('The configured name of the CDRM.')
t5CdrmSnaName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrmSnaName.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrmSnaName.setDescription('The name of the CDRM as it is known in the SNA network. May be a zero length string, in which case, the t5CdrmName is the SNA name.')
t5CdrmType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("external", 1), ("snanet", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrmType.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrmType.setDescription('Identifies the type of CDRM.')
t5CdrmAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("up", 2), ("down", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: t5CdrmAdminState.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrmAdminState.setDescription('Used by the Management Station to control the CDRM. Values up (2) and down (3) can be read or written, while value other (1) is read-only and indicates that this variable has not been set since reboot.')
t5CdrmOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("active", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrmOperState.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrmOperState.setDescription('The current operational state of the CDRM in relation to this Type 5 node SSCP.')
t5CdrmSubareaNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrmSubareaNumber.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrmSubareaNumber.setDescription('The subarea number for the CDRM.')
t5CdrmNetworkName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrmNetworkName.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrmNetworkName.setDescription('The SNA network name for the CDRM.')
t5CdrmElementAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 2, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrmElementAddress.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrmElementAddress.setDescription('Element Address assigned to this CDRM. ')
t5CdrscTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 3), )
if mibBuilder.loadTexts: t5CdrscTable.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrscTable.setDescription('This table contains information about other Crossdomain Resources known to this type 5 node.')
t5CdrscEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 3, 1), ).setIndexNames((0, "SNANET-MIB", "t5CdrscT5nodeIndex"), (0, "SNANET-MIB", "t5CdrscName"))
if mibBuilder.loadTexts: t5CdrscEntry.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrscEntry.setDescription('The entry contains information about a CDRSC. Variables have read-only access. The variable t5CdrscAdminState has read-write access and is used to control a CDRSC.')
t5CdrscT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrscT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrscT5nodeIndex.setDescription('Index in the t5nodetable of the t5node this CDRSC is associated with.')
t5CdrscName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrscName.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrscName.setDescription('The configured name of the CDRSC.')
t5CdrscSnaName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrscSnaName.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrscSnaName.setDescription('The name of the CDRSC as it is known in the SNA Network. The SnaName is the same a the t5CdrscName when this value is a zero length string.')
t5CdrscCdrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 3, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrscCdrmName.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrscCdrmName.setDescription('The name the CDRM which owns this CDRSC.')
t5CdrscAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("up", 2), ("down", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: t5CdrscAdminState.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrscAdminState.setDescription('Used by the Management Station to control the use of the CDRSC. Values up (2) and down (3) may be read or written, while the value other(1) read-only and indicates this variable has not been set since the last reboot.')
t5CdrscOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("active", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrscOperState.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrscOperState.setDescription('The current operational state of the CDRSC in relation to this Type 5 node SSCP.')
t5CdrscSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 3, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrscSessions.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrscSessions.setDescription('The number of active SNA LU-LU sessions for this CDRSC.')
t5CdrscDlmName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 3, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrscDlmName.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrscDlmName.setDescription('The name of the Default Logon mode table used by this CDRSC.')
t5CdrscCosName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 3, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CdrscCosName.setStatus('mandatory')
if mibBuilder.loadTexts: t5CdrscCosName.setDescription('The name of the Class of Service table used by this CDRSC.')
t5DlmTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4), )
if mibBuilder.loadTexts: t5DlmTable.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmTable.setDescription('This table contains information about Default Logon Mode Tables used by the SSCP for LU-LU session establishment.')
t5DlmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1), ).setIndexNames((0, "SNANET-MIB", "t5DlmT5nodeIndex"), (0, "SNANET-MIB", "t5DlmName"))
if mibBuilder.loadTexts: t5DlmEntry.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmEntry.setDescription('The entry contains a Default Logon Mode Table entry. All variables are read-only.')
t5DlmT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmT5nodeIndex.setDescription('Index in the t5nodetable of the t5node this DLM is associated with.')
t5DlmName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmName.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmName.setDescription('The configured name of the Default Logon Mode.')
t5DlmSnaName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmSnaName.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmSnaName.setDescription('The name of the Default Logon Mode as it is known in the SNA network. May contain a zero length string, in which case, the t5DlmName is the SNA name.')
t5DlmFmprof = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmFmprof.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmFmprof.setDescription('The Function Management Profile type, a value from 0 to 18 hexadecimal(byte 1 in the session parameter field).')
t5DlmTsprof = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmTsprof.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmTsprof.setDescription('The Transmission Services profile type, a value from 1 to 17 hexadecimal(byte 2 in the session parameter field).')
t5DlmPriprot = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmPriprot.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmPriprot.setDescription('The primary logical unit protocol, a value from 0 to 0FF hexadecimal(byte 3 in the session parameter field).')
t5DlmSecprot = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmSecprot.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmSecprot.setDescription('The secondary logical unit protocol, a value from 0 to 0FF hexadecimal(byte 4 in the session parameter field).')
t5DlmComprot = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmComprot.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmComprot.setDescription('The common logical unit protocol, a value from 0 to 0FFFF hexadecimal(byte 5 and 6 in the session parameter field).')
t5DlmRusizes = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmRusizes.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmRusizes.setDescription('The maximum length of data request units in bytes. A four digit hexadecimal value is provided in the same format as for the ACF/VTAM generation(bytes 9 and 10 in the session parameter field.')
t5DlmPservic = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(12, 12)).setFixedLength(12)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmPservic.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmPservic.setDescription('The logical unit presentation services profile and usage field(bytes 13 through 24 in the session parameter field). A 24 digit hexadecimal number.')
t5DlmPsndpac = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmPsndpac.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmPsndpac.setDescription('The primary send pacing count(byte 11 in the session parameter field), a value from 0 to 63 decimal.')
t5DlmPrcvpac = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 12), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmPrcvpac.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmPrcvpac.setDescription('The primary receive pacing count(byte 12 in the session parameter field), a value from 0 to 63 decimal.')
t5DlmSsndpac = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 13), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmSsndpac.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmSsndpac.setDescription('The secondary send pacing count(byte 7 in the session parameter field), a value from 0 to 63 decimal.')
t5DlmSrcvpac = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 14), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmSrcvpac.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmSrcvpac.setDescription('The secondary receive pacing count(byte 8 in the session parameter field), a value from 0 to 63 decimal.')
t5DlmEncr = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmEncr.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmEncr.setDescription('The encryption/decryption type expected by the logical unit(first four bits of byte 26 in the session parameter field), a value from 0 to F hexadecimal.')
t5DlmBindType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmBindType.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmBindType.setDescription('The BIND type, a value of 0 or 1. 0 means a negotiable BIND 1 means a non-negotiable BIND.')
t5DlmCos = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 4, 1, 17), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5DlmCos.setStatus('mandatory')
if mibBuilder.loadTexts: t5DlmCos.setDescription('The name of the class of service to be used for a session that uses this logon mode.')
t5CosTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 5), )
if mibBuilder.loadTexts: t5CosTable.setStatus('mandatory')
if mibBuilder.loadTexts: t5CosTable.setDescription('This table contain class of service (COS) entries.')
t5CosEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 5, 1), ).setIndexNames((0, "SNANET-MIB", "t5CosT5nodeIndex"), (0, "SNANET-MIB", "t5CosName"))
if mibBuilder.loadTexts: t5CosEntry.setStatus('mandatory')
if mibBuilder.loadTexts: t5CosEntry.setDescription('The entry contains a class of service entry. All variables are read-only.')
t5CosT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 5, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CosT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: t5CosT5nodeIndex.setDescription('Index in the t5nodetable of the t5node this COS is associated with.')
t5CosName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 5, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CosName.setStatus('mandatory')
if mibBuilder.loadTexts: t5CosName.setDescription('The configured name of the class of service entry.')
t5CosSnaName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 5, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CosSnaName.setStatus('mandatory')
if mibBuilder.loadTexts: t5CosSnaName.setDescription('The name of the class of service entry as it is known in the SNA Network. May be a zero length string, in which case, the t5CosName is the SNA name.')
t5CosVrids = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 5, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 48))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5CosVrids.setStatus('mandatory')
if mibBuilder.loadTexts: t5CosVrids.setDescription('A list of virtual routes, in hierarchical order, that are used for LU-LU session traffic. The list is octet pairs in the form x,y where x is the virtual route number from 0 to 7, and y is the transmission priority from 0 to 3. Up to 24 ordered pairs are possible (vr1,tp1 vr2,tp2 ...vr24,tp24).')
t5AliasTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 6), )
if mibBuilder.loadTexts: t5AliasTable.setStatus('mandatory')
if mibBuilder.loadTexts: t5AliasTable.setDescription('This table is a crossreference for alias names to SNA/net resource names. The current use of this table is for alternate logon names for Crossdomain resources.')
t5AliasEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 6, 1), ).setIndexNames((0, "SNANET-MIB", "t5AliasT5nodeIndex"), (0, "SNANET-MIB", "t5AliasName"))
if mibBuilder.loadTexts: t5AliasEntry.setStatus('mandatory')
if mibBuilder.loadTexts: t5AliasEntry.setDescription('The entry contains the alias name and a reference to the SNA (CDRSC) resource it refers to. All variables are read-only.')
t5AliasT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 6, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5AliasT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: t5AliasT5nodeIndex.setDescription('Index in the t5nodetable of the t5node this Alias Name is associated with.')
t5AliasName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 6, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5AliasName.setStatus('mandatory')
if mibBuilder.loadTexts: t5AliasName.setDescription('The configured alias name.')
t5AliasResourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 2, 6, 1, 3), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t5AliasResourceId.setStatus('mandatory')
if mibBuilder.loadTexts: t5AliasResourceId.setDescription('A reference to the SNA resource (CDRSC) identified by this Alias Name. It is the object identifier representing the instance of the index variable of the resource.')
saErTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 1), )
if mibBuilder.loadTexts: saErTable.setStatus('mandatory')
if mibBuilder.loadTexts: saErTable.setDescription('This table contains information about explicit routes between this subarea and other network subareas.')
saErEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 1, 1), ).setIndexNames((0, "SNANET-MIB", "saErT5nodeIndex"), (0, "SNANET-MIB", "saErDestinationSubarea"), (0, "SNANET-MIB", "saErNumber"))
if mibBuilder.loadTexts: saErEntry.setStatus('mandatory')
if mibBuilder.loadTexts: saErEntry.setDescription('This entry contain information about an explicit route.')
saErT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saErT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: saErT5nodeIndex.setDescription('Index in the t5nodetable of the t5node this explicit route is associated with.')
saErDestinationSubarea = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saErDestinationSubarea.setStatus('mandatory')
if mibBuilder.loadTexts: saErDestinationSubarea.setDescription('The destination subarea number for this Expicit Route.')
saErNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saErNumber.setStatus('mandatory')
if mibBuilder.loadTexts: saErNumber.setDescription('The number of the Explicit Route.')
saErTgNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saErTgNumber.setStatus('mandatory')
if mibBuilder.loadTexts: saErTgNumber.setDescription('The Transmission Group number associated with this Explicit Route.')
saErOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("other", 1), ("inoperative", 2), ("operative", 3), ("actPend", 4), ("innActPend", 5), ("innAct", 6), ("actNoSend", 7), ("active", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saErOperState.setStatus('mandatory')
if mibBuilder.loadTexts: saErOperState.setDescription('The current operational state of the Explicit Route.')
saVrTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 2), )
if mibBuilder.loadTexts: saVrTable.setStatus('mandatory')
if mibBuilder.loadTexts: saVrTable.setDescription('This table contains information about virtual routes between this subarea and other network subareas. Virtual routes are logical connections between subareas and are mapped to explicit routes.')
saVrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 2, 1), ).setIndexNames((0, "SNANET-MIB", "saVrT5nodeIndex"), (0, "SNANET-MIB", "saVrErNumber"), (0, "SNANET-MIB", "saVrNumber"), (0, "SNANET-MIB", "saVrTransmissionPriority"))
if mibBuilder.loadTexts: saVrEntry.setStatus('mandatory')
if mibBuilder.loadTexts: saVrEntry.setDescription('This entry contains information about a virtual route. All variables are read-only.')
saVrT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saVrT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: saVrT5nodeIndex.setDescription('Index in the t5nodetable of the t5node this virtual route is associated with.')
saVrErNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saVrErNumber.setStatus('mandatory')
if mibBuilder.loadTexts: saVrErNumber.setDescription('The expicit route to which this virtual route belongs.')
saVrNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saVrNumber.setStatus('mandatory')
if mibBuilder.loadTexts: saVrNumber.setDescription('The number of the Virtual Route.')
saVrTransmissionPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saVrTransmissionPriority.setStatus('mandatory')
if mibBuilder.loadTexts: saVrTransmissionPriority.setDescription('The transmission priority for this virtual route.')
saVrWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saVrWindowSize.setStatus('mandatory')
if mibBuilder.loadTexts: saVrWindowSize.setDescription('The initial window size for this virtual route.')
saVrMinWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saVrMinWindowSize.setStatus('mandatory')
if mibBuilder.loadTexts: saVrMinWindowSize.setDescription('The minimum window size for this virtual route.')
saVrMaxWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saVrMaxWindowSize.setStatus('mandatory')
if mibBuilder.loadTexts: saVrMaxWindowSize.setDescription('The maximum window size for this virtual route.')
saVrPacingCount = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 2, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saVrPacingCount.setStatus('mandatory')
if mibBuilder.loadTexts: saVrPacingCount.setDescription('The pacing count for this virtual route.')
saTransmissionGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3))
saTgTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1), )
if mibBuilder.loadTexts: saTgTable.setStatus('mandatory')
if mibBuilder.loadTexts: saTgTable.setDescription('This table contains information about transmission groups between this subarea and other network subareas. Entries are dynamically added to this table as they are learned from TG link activations.')
saTgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1), ).setIndexNames((0, "SNANET-MIB", "saTgT5nodeIndex"), (0, "SNANET-MIB", "saTgNumber"), (0, "SNANET-MIB", "saTgAdjacentSubarea"))
if mibBuilder.loadTexts: saTgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: saTgEntry.setDescription('This entry contains information about a transmission group.')
saTgT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: saTgT5nodeIndex.setDescription('Index in the t5nodetable of the t5node this transmission group is associated with.')
saTgNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgNumber.setStatus('mandatory')
if mibBuilder.loadTexts: saTgNumber.setDescription('The number of this transmission group.')
saTgAdjacentSubarea = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgAdjacentSubarea.setStatus('mandatory')
if mibBuilder.loadTexts: saTgAdjacentSubarea.setDescription('The adjacent subarea number with which this transmission group is associated.')
saTgOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("inactive", 1), ("active", 2), ("pendActive", 3), ("pendInactive", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgOperState.setStatus('mandatory')
if mibBuilder.loadTexts: saTgOperState.setDescription('The current operational state of this transmission group.')
saTgMaxSendPiuSize = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65565))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgMaxSendPiuSize.setStatus('mandatory')
if mibBuilder.loadTexts: saTgMaxSendPiuSize.setDescription('The maximum Path Information Unit size in bytes that may be sent on this Tranmission Group.')
saTgMaxReceivePiuSize = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgMaxReceivePiuSize.setStatus('mandatory')
if mibBuilder.loadTexts: saTgMaxReceivePiuSize.setDescription('The maximum Path Information Unit size in bytes that may be received on this Tranmission Group.')
saTgActiveTime = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 7), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgActiveTime.setStatus('mandatory')
if mibBuilder.loadTexts: saTgActiveTime.setDescription('The value of sysUpTime when this transmission group became active.')
saTgLastStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 8), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLastStateChange.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLastStateChange.setDescription('The value of sysUpTime when the last state change occurred.')
saTgSentBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgSentBytes.setStatus('mandatory')
if mibBuilder.loadTexts: saTgSentBytes.setDescription('The number of bytes sent on this transmission group.')
saTgReceivedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgReceivedBytes.setStatus('mandatory')
if mibBuilder.loadTexts: saTgReceivedBytes.setDescription('The number of bytes received on transmission group.')
saTgSentBtus = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgSentBtus.setStatus('mandatory')
if mibBuilder.loadTexts: saTgSentBtus.setDescription('The number of Basic Transmission Units sent on this transmission group.')
saTgReceivedBtus = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgReceivedBtus.setStatus('mandatory')
if mibBuilder.loadTexts: saTgReceivedBtus.setDescription('The number of Basic Transmission Units received on this transmission group.')
saTgLinkTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2), )
if mibBuilder.loadTexts: saTgLinkTable.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkTable.setDescription('This table contains an entry for each link which may be used for a transmission group. The links are not associated with a transmission group until saTgLinkOperState is active (2).')
saTgLinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1), ).setIndexNames((0, "SNANET-MIB", "saTgLinkT5nodeIndex"), (0, "SNANET-MIB", "saTgLinkIndex"))
if mibBuilder.loadTexts: saTgLinkEntry.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkEntry.setDescription('Contains configuration and state information about TG links. Variables have read-only access. The variable saTgLinkAdminState has read-write access and is used to control a TG link.')
saTgLinkT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkT5nodeIndex.setDescription('Identifies the Type 5 node with which this transmission group link is associated.')
saTgLinkIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkIndex.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkIndex.setDescription('Unique index of this transmisson group link. The value is assigned by the agent and is unique among TG and PU links.')
saTgLinkTgNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkTgNumber.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkTgNumber.setDescription('The transmission group number to which this link is associated. Contains a value of zero (0) when saTgLinkOperState is inactive(1).')
saTgLinkAdjacentSubarea = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkAdjacentSubarea.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkAdjacentSubarea.setDescription('The adjacent subarea number with which this transmission group is associated. Contains a value of zero when saTgLinkOperState in inactive(1).')
saTgLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkName.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkName.setDescription('The administratively assigned name of the transmission group link.')
saTgLinkOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inactive", 1), ("active", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkOperState.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkOperState.setDescription('The current operational state of this transmission group link.')
saTgLinkAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("up", 2), ("down", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: saTgLinkAdminState.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkAdminState.setDescription('Used by the Management Station to control the transmission group Link. Values up (2) and down (3) can be read or written, while value other (1) is read-only and indicates that this variable has not been set since reboot.')
saTgLinkType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("internal", 1), ("link8022", 2), ("qllc", 3), ("sdlc", 4), ("channel", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkType.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkType.setDescription('The type of transmission group link.')
saTgLinkSpecific = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 9), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkSpecific.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkSpecific.setDescription('A row in a table specific for the link. It is the object identifier representing the instance of the index variable in one of the snaLink tables from this MIB.')
saTgLinkActiveTime = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 10), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkActiveTime.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkActiveTime.setDescription('The value of sysUpTime when this TG link became active.')
saTgLinkLastStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 11), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkLastStateChange.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkLastStateChange.setDescription('The value of sysUptime when the last state change occurred.')
saTgLinkSentBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkSentBytes.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkSentBytes.setDescription('The number of bytes sent on this TG link.')
saTgLinkReceivedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkReceivedBytes.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkReceivedBytes.setDescription('The number of bytes received on this TG link.')
saTgLinkSentBtus = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkSentBtus.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkSentBtus.setDescription('The number of Basic Transmission Units sent on this TG link.')
saTgLinkReceivedBtus = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 3, 3, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: saTgLinkReceivedBtus.setStatus('mandatory')
if mibBuilder.loadTexts: saTgLinkReceivedBtus.setDescription('The number of Basic Transmission Units received on this TG link.')
t2node = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1))
snaLu = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2))
applicationLu = MibIdentifier((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3))
t2nodeTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1), )
if mibBuilder.loadTexts: t2nodeTable.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeTable.setDescription('This table contains all configured and dynamic parameters of type 2 nodes have read-only access. There is also one control parameter, t2nodeAdminState which has read-write access. The link specific parameters are contained in a row of a separate table referenced by the snaLinkSpecific object.')
t2nodeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1), ).setIndexNames((0, "SNANET-MIB", "t2nodeT5nodeIndex"), (0, "SNANET-MIB", "t2nodeIndex"))
if mibBuilder.loadTexts: t2nodeEntry.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeEntry.setDescription('Entry contains all parameters of one type 2 node. They have read-only access. The entry is created by the Agent. The variable t2nodeAdminState has read-write access and is used to start or stop the node.')
t2nodeT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeT5nodeIndex.setDescription('Index in the t5nodeTable to which this t2node is associated.')
t2nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeIndex.setDescription('Used to index the instances of objects assigned by the Agent.')
t2nodeName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeName.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeName.setDescription('The value identifies the name of the Node.')
t2nodeType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("pu20prim", 2), ("pu20sec", 3), ("t21LEN", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeType.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeType.setDescription('This value identifies the type of Node.')
t2nodeBlockNum = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(3, 3)).setFixedLength(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeBlockNum.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeBlockNum.setDescription('The value identifies the block number for this Node instance. It is the first 3 hexadecimal digits of the t2node id.')
t2nodeIdNum = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(5, 5)).setFixedLength(5)).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeIdNum.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeIdNum.setDescription('The value identifies the ID number for this Node instance. This is the last 5 hexadecimal digits of the t2node id.')
t2nodeMaxPiu = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeMaxPiu.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeMaxPiu.setDescription('Maximum number of octets that can be exchanged by this PU in one Pathcontrol Information Unit (PIU).')
t2nodeLinkType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("internal", 1), ("link8022", 2), ("qllc", 3), ("sdlc", 4), ("channelAttach", 5), ("tcpip", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeLinkType.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeLinkType.setDescription('The type of link protocol used for this PU.')
t2nodeLinkSpecific = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 9), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeLinkSpecific.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeLinkSpecific.setDescription('The row in a table specific for the link. It is the object identifier representing the instance of the index variable in one of the snaLink tables from this MIB.')
t2nodeOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3), ("active", 4), ("busy", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeOperState.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeOperState.setDescription('The operational state of the type 2 Node.')
t2nodeAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("start", 2), ("stop", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: t2nodeAdminState.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeAdminState.setDescription('The administrative state of the type 2 Node and is used by the Management Station to start or stop the Node. The values that can be read and written are: start (2) - this value has to be used to start, stop (3) - this value has to be used to stop. The values that can be read only are: other (1) - this value indicates that the variable has not been set after reboot.')
t2nodeStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 12), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStartTime.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStartTime.setDescription('The value of sysUpTime at type 2 Node activation.')
t2nodeLastStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 13), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeLastStateChange.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeLastStateChange.setDescription('The value of sysUpTime at the last state change of the type 2 Node.')
t2nodeActFailureReason = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeActFailureReason.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeActFailureReason.setDescription('The sense code for the activation failure. It will be sent in the trap t2NodeActFailTrap.')
t2nodeStatsTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2), )
if mibBuilder.loadTexts: t2nodeStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsTable.setDescription('This table contains the dynamic parameters which have read-only access. The entries in this table augment the entries in the t2nodeTable and cannot be created by Management Station.')
t2nodeStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1), ).setIndexNames((0, "SNANET-MIB", "t2nodeStatsT5nodeIndex"), (0, "SNANET-MIB", "t2nodeStatsIndex"))
if mibBuilder.loadTexts: t2nodeStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsEntry.setDescription('The entry contains parameters which describe the statistics of one t2node. They have read-only access. The counters represent traffic for all kinds of sessions: LU-LU, SSCP-PU, and SSCP-LU. The entry is created by the Agent.')
t2nodeStatsT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsT5nodeIndex.setDescription(' Index in the t5nodeTable to which this t2node is associated.')
t2nodeStatsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsIndex.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsIndex.setDescription('The instance of the entry parameters. The index value is taken by the Agent from t2nodeIndex.')
t2nodeStatsSentBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsSentBytes.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsSentBytes.setDescription('The number of bytes sent by this Node.')
t2nodeStatsReceivedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsReceivedBytes.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsReceivedBytes.setDescription('The number of bytes received by this Node.')
t2nodeStatsSentPius = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsSentPius.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsSentPius.setDescription('The number of PIUs sent by this Node.')
t2nodeStatsReceivedPius = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsReceivedPius.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsReceivedPius.setDescription('The number of PIUs received by this Node.')
t2nodeStatsSentNegativeResps = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsSentNegativeResps.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsSentNegativeResps.setDescription('The number of negative responses sent by this Node.')
t2nodeStatsReceivedNegativeResps = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsReceivedNegativeResps.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsReceivedNegativeResps.setDescription('The number of negative responses received by this Node.')
t2nodeStatsActiveLus = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsActiveLus.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsActiveLus.setDescription('The number of LUs currently active on this PU.')
t2nodeStatsActLus = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsActLus.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsActLus.setDescription('The number of LUs on this type 2 Node which have active SSCP-LU sessions.')
t2nodeStatsInActLus = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsInActLus.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsInActLus.setDescription('The number of LUs on this type 2 Node which do not have active SSCP-LU sessions.')
t2nodeStatsBindLus = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 1, 2, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: t2nodeStatsBindLus.setStatus('mandatory')
if mibBuilder.loadTexts: t2nodeStatsBindLus.setDescription('The number of LUs on this type 2 Node which have received and acknowledged a BIND request.')
snaLuTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1), )
if mibBuilder.loadTexts: snaLuTable.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuTable.setDescription('This table contains configuration and state information relating to LUs.')
snaLuEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1), ).setIndexNames((0, "SNANET-MIB", "snaLuT5nodeIndex"), (0, "SNANET-MIB", "snaLuT2nodeIndex"), (0, "SNANET-MIB", "snaLuIndex"))
if mibBuilder.loadTexts: snaLuEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuEntry.setDescription('Contains configuration and state objects relating to an LU. All have read-only access with the exeption of snaLuAdminState which has read-write access and is used by the Management station to control the state of the LU.')
snaLuT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuT5nodeIndex.setDescription('Index in the t5nodeTable to which this LU is associated.')
snaLuT2nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuT2nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuT2nodeIndex.setDescription('Index in the t2nodeTable of the type 2 Node with which this LU is associated.')
snaLuIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuIndex.setDescription('This value identifies a unique index for an LU instance within a type 2 Node.')
snaLuName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuName.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuName.setDescription('The SNA name for this LU. ')
snaLuType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("other", 1), ("lu0", 2), ("lu1", 3), ("lu2", 4), ("lu3", 5), ("lu62", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuType.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuType.setDescription('Identifies whether the LU is Type 0, 1, 2, 3 or 6.2')
snaLuLocalAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuLocalAddress.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuLocalAddress.setDescription('The local address for this LU. It is a byte with a value ranging in size from 0 to 255. For dependent LUs this value ranges from 1 to 255. For independent Lus this value is always 0.')
snaLuUserName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuUserName.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuUserName.setDescription('The name of the non-sna terminal or SNA LU that is using this LU. This value does not apply to all LUs and contains a zero length string if not known or does not apply.')
snaLuPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuPoolName.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuPoolName.setDescription('The name of the LU pool to which this LU belongs. An LU Pool is configured by the user. It contains a group of LUs any of which can be assigned to a user requesting an LU by the Pool name. May be a zero length string if this LU does not belong to a pool.')
snaLuOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inactive", 1), ("active", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuOperState.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuOperState.setDescription('The current operational state of this LU: For dependent LUs active(2) indicates the SSCP-LU session is active. For Independent LUs active(2) indicates that the LU is able to send or receive BINDs.')
snaLuAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("up", 2), ("down", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snaLuAdminState.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuAdminState.setDescription('This object is used by a management station to control the LU. Values up(2) and down(3) can be read or written, while a value other(1) is read-only and indicates that this variable has not been set since reboot.')
snaLuLastStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 11), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuLastStateChange.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuLastStateChange.setDescription('The value of sysUpTime when the last state change occurred.')
snaLuActiveTime = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 12), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuActiveTime.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuActiveTime.setDescription('The value of sysUpTime when this LU becomes active.')
snaLuBindFailureReason = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 2, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLuBindFailureReason.setStatus('mandatory')
if mibBuilder.loadTexts: snaLuBindFailureReason.setDescription('The sense code when there is a bind failure.')
applicationLuTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1), )
if mibBuilder.loadTexts: applicationLuTable.setStatus('mandatory')
if mibBuilder.loadTexts: applicationLuTable.setDescription('Table of application LUs configuration and operational information.')
appLuEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1), ).setIndexNames((0, "SNANET-MIB", "appLuT5nodeIndex"), (0, "SNANET-MIB", "appLuIndex"))
if mibBuilder.loadTexts: appLuEntry.setStatus('mandatory')
if mibBuilder.loadTexts: appLuEntry.setDescription('Contains configuration and operational variables of an application LU. Variables are read-only with the exception of appLuAdminState which has read-write access and is used by the Management station to control the state of the Application LU.')
appLuT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: appLuT5nodeIndex.setDescription('Index in the t5nodeTable to which this application LU is associated.')
appLuIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuIndex.setStatus('mandatory')
if mibBuilder.loadTexts: appLuIndex.setDescription('Unique index of an application LU.')
appLuName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuName.setStatus('mandatory')
if mibBuilder.loadTexts: appLuName.setDescription('Configured name for this application LU.')
appLuConversionType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("interactive", 1), ("transparent", 2), ("native", 3), ("netvOper", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuConversionType.setStatus('mandatory')
if mibBuilder.loadTexts: appLuConversionType.setDescription('Indicates the type of protocol conversion being performed on behalf of the application LU.')
appLuHostInterfaceType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("interactive", 1), ("outbound", 2), ("appc", 3), ("batch", 4), ("rbfte", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuHostInterfaceType.setStatus('mandatory')
if mibBuilder.loadTexts: appLuHostInterfaceType.setDescription('Identifies the type of application interface used by this application LU.')
appLuApplicationName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuApplicationName.setStatus('mandatory')
if mibBuilder.loadTexts: appLuApplicationName.setDescription('The name of the host application which uses this application LU.')
appLuGatewayName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuGatewayName.setStatus('mandatory')
if mibBuilder.loadTexts: appLuGatewayName.setDescription('The name of the application gateway. The gateway is used to group application LUs together for management purposes.')
appLuAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("up", 2), ("down", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: appLuAdminState.setStatus('mandatory')
if mibBuilder.loadTexts: appLuAdminState.setDescription('Used by the Management Station to control the Application LU. Values up (2) and down (3) can be read or written, while value other (1) is read-only and indicates that this variable has not been set since reboot.')
appLuOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("inactive", 3), ("active", 4), ("busy", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuOperState.setStatus('mandatory')
if mibBuilder.loadTexts: appLuOperState.setDescription('The current operational state of this application LU: whether it is inactive, active, or has one of more active sessions.')
appLuActiveTime = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 10), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuActiveTime.setStatus('mandatory')
if mibBuilder.loadTexts: appLuActiveTime.setDescription('The value of sysUpTime when this application LU becomes active.')
appLuLastStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 11), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuLastStateChange.setStatus('mandatory')
if mibBuilder.loadTexts: appLuLastStateChange.setDescription('The value of sysUpTime when the last state change occurred.')
appLuBindFailureReason = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 1, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuBindFailureReason.setStatus('mandatory')
if mibBuilder.loadTexts: appLuBindFailureReason.setDescription('The sense code when there is a bind failure.')
appLuBatchDeviceTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 2), )
if mibBuilder.loadTexts: appLuBatchDeviceTable.setStatus('mandatory')
if mibBuilder.loadTexts: appLuBatchDeviceTable.setDescription('Table of configuration information for batch devices associated with a batch type application LU.')
appLuBatchDeviceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 2, 1), ).setIndexNames((0, "SNANET-MIB", "appLuBatchDeviceT5nodeIndex"), (0, "SNANET-MIB", "appLuBatchDeviceLuIndex"), (0, "SNANET-MIB", "appLuBatchDeviceName"))
if mibBuilder.loadTexts: appLuBatchDeviceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: appLuBatchDeviceEntry.setDescription('The entry contains configuration variables for batch devices associated with batch type application LUs.')
appLuBatchDeviceT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuBatchDeviceT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: appLuBatchDeviceT5nodeIndex.setDescription('Index in the t5nodeTable to which this batch device is associated.')
appLuBatchDeviceLuIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuBatchDeviceLuIndex.setStatus('mandatory')
if mibBuilder.loadTexts: appLuBatchDeviceLuIndex.setDescription('The index of an application LU to which this batch device is associated.')
appLuBatchDeviceName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuBatchDeviceName.setStatus('mandatory')
if mibBuilder.loadTexts: appLuBatchDeviceName.setDescription('The configured name for this batch device.')
appLuBatchDeviceType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("console", 1), ("cardreader", 2), ("cardpunch", 3), ("printer", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuBatchDeviceType.setStatus('mandatory')
if mibBuilder.loadTexts: appLuBatchDeviceType.setDescription('Indicates the type of batch device.')
appLuBatchDeviceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 4, 3, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 14))).setMaxAccess("readonly")
if mibBuilder.loadTexts: appLuBatchDeviceNumber.setStatus('mandatory')
if mibBuilder.loadTexts: appLuBatchDeviceNumber.setDescription('A device number used to identify multiple devices of the same type.')
snaSessionTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1), )
if mibBuilder.loadTexts: snaSessionTable.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessionTable.setDescription('Table containing dynamic statistics information relating to SNA sessions.')
snaSessionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1), ).setIndexNames((0, "SNANET-MIB", "snaSessT5nodeIndex"), (0, "SNANET-MIB", "snaSessNauName"), (0, "SNANET-MIB", "snaSessNauSessNumber"))
if mibBuilder.loadTexts: snaSessionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessionEntry.setDescription('Contains information about an SNA session. Objects in this table have read-only access.')
snaSessT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessT5nodeIndex.setDescription('Identifies the Type 5 node which is associated with this SNA session.')
snaSessNauName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessNauName.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessNauName.setDescription('The name of the SNA network addressable unit (NAU).')
snaSessNauSessNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessNauSessNumber.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessNauSessNumber.setDescription('A number identifying the session number within the NAU.')
snaSessType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("sscp-sscp", 1), ("lu-lu", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessType.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessType.setDescription('Identifies the type of SNA session.')
snaSessNauElementAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessNauElementAddress.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessNauElementAddress.setDescription('The element address assigned to the SNA NAU by the Type 5 node.')
snaSessState = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("initiating", 1), ("terminating", 2), ("queued", 3), ("active", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessState.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessState.setDescription('The state of the session. Valid values for all session types are: initiating (1), terminating (2), and active (4). The value queued (3) is valid only for snaSessType value of lu-lu (2). Some variables are unknown when the session state is initiating (1) or queued (2). ')
snaSessActiveTime = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 7), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessActiveTime.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessActiveTime.setDescription('The value of sysUpTime when session becomes active.')
snaSessSentBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessSentBytes.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessSentBytes.setDescription('The number of bytes sent on this SNA session.')
snaSessReceivedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessReceivedBytes.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessReceivedBytes.setDescription('The number of bytes received on this session.')
snaSessSentRus = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessSentRus.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessSentRus.setDescription('The number of RUs sent on this session.')
snaSessReceivedRus = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessReceivedRus.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessReceivedRus.setDescription('The number of RUs received on this session.')
snaSessSentNegativeResps = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessSentNegativeResps.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessSentNegativeResps.setDescription('The number of negative responses sent on this session.')
snaSessReceivedNegativeResps = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessReceivedNegativeResps.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessReceivedNegativeResps.setDescription('The number of negative responses received on this session.')
snaSessPartnerNauName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessPartnerNauName.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessPartnerNauName.setDescription('The name of the session partner NAU. May be a zero length string if not known.')
snaSessPartnerNauSubarea = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessPartnerNauSubarea.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessPartnerNauSubarea.setDescription('The subarea number of the session partner NAU. Will be zero if not known.')
snaSessPartnerNauElementAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65536))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessPartnerNauElementAddress.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessPartnerNauElementAddress.setDescription('The element address of the session partner NAU. Will be zero if not known.')
snaSessVirtualRouteNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessVirtualRouteNumber.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessVirtualRouteNumber.setDescription('A value indicating the virtual route number for this session. Will be zero if not known.')
snaSessTransmissionPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessTransmissionPriority.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessTransmissionPriority.setDescription('A transmission priority for this session. Will be zero if not known.')
snaSessProcCorrelationId = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 19), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessProcCorrelationId.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessProcCorrelationId.setDescription('This variable uniquely identifies this lu-lu session. Will contain a zero length string if snaSessType value is not lu-lu (2).')
snaSessPluIndicator = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("plu", 2), ("slu", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessPluIndicator.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessPluIndicator.setDescription('It indicates whether LU is primary or secondary for this session. Will be other (1) if snaSessType value is not lu-lu (2).')
snaSessModeName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 5, 1, 1, 21), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaSessModeName.setStatus('mandatory')
if mibBuilder.loadTexts: snaSessModeName.setDescription('The name of the mode used for this session. Contains a zero length string if snaSessType value is not lu-lu (2).')
snaLink802Dot2Table = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1), )
if mibBuilder.loadTexts: snaLink802Dot2Table.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2Table.setDescription('This table contains Managed Objects which describe basic configuration parameters for PUs or TGs using 802.2 links.')
snaLink802Dot2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1), ).setIndexNames((0, "SNANET-MIB", "snaLink802Dot2T5nodeIndex"), (0, "SNANET-MIB", "snaLink802Dot2Index"))
if mibBuilder.loadTexts: snaLink802Dot2Entry.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2Entry.setDescription('Entry contains all link configuration parameters for one PU or TG. The objects in the entry have read-only access.')
snaLink802Dot2T5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLink802Dot2T5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2T5nodeIndex.setDescription('The index of the Type 5 node associated with this link.')
snaLink802Dot2Index = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLink802Dot2Index.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2Index.setDescription('The index variable assigned by Agent.')
snaLink802Dot2SourceAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLink802Dot2SourceAddress.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2SourceAddress.setDescription('Medium Access Control (MAC) address of the source node of this logical link.')
snaLink802Dot2SourceSAP = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLink802Dot2SourceSAP.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2SourceSAP.setDescription("The source node's Service Access Point (SSAP).")
snaLink802Dot2DestinationAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLink802Dot2DestinationAddress.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2DestinationAddress.setDescription('Medium Access Control (MAC) address of the destination node of this logical link.')
snaLink802Dot2DestinationSAP = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLink802Dot2DestinationSAP.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2DestinationSAP.setDescription('The Service Access Point used by the remote node (DSAP).')
snaLink802Dot2MediaType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLink802Dot2MediaType.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2MediaType.setDescription('The underlying physical media type: token-ring or ethernet.')
snaLink802Dot2Role = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("secondary", 1), ("primary", 2), ("negotiable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLink802Dot2Role.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2Role.setDescription('The role of the PU used for the 802.2 link.')
snaLink802Dot2LineName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLink802Dot2LineName.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2LineName.setDescription('The configured name of the associated line.')
snaLink802Dot2Port = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLink802Dot2Port.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2Port.setDescription('The physical port location in the system. The first octet contains the port processor id (0 to 255) and the second octet contains the line number (0-8) for line modules which support multiple lines. ')
snaLink802Dot2IfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLink802Dot2IfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLink802Dot2IfIndex.setDescription('The IfIndex value of the interface used by this link.')
snaLinkSdlcTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 2), )
if mibBuilder.loadTexts: snaLinkSdlcTable.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkSdlcTable.setDescription('This table contains Managed Objects which describe basic configuration parameters for PUs using SDLC link.')
snaLinkSdlcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 2, 1), ).setIndexNames((0, "SNANET-MIB", "snaLinkSdlcT5nodeIndex"), (0, "SNANET-MIB", "snaLinkSdlcIndex"))
if mibBuilder.loadTexts: snaLinkSdlcEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkSdlcEntry.setDescription('Entry contains all link configuration parameters for one PU. The objects in the entry have read-only access.')
snaLinkSdlcT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkSdlcT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkSdlcT5nodeIndex.setDescription('The index of the Type 5 node associated with this link.')
snaLinkSdlcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkSdlcIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkSdlcIndex.setDescription('The index variable assigned by Agent.')
snaLinkSdlcDestinationStationAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 2, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkSdlcDestinationStationAddr.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkSdlcDestinationStationAddr.setDescription('Station Address (1 byte) of the destination node.')
snaLinkSdlcStationRole = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("secondary", 1), ("primary", 2), ("negotiable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkSdlcStationRole.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkSdlcStationRole.setDescription('The role of the local station in relation to a remote one.')
snaLinkSdlcLineName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 2, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkSdlcLineName.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkSdlcLineName.setDescription('The configured name of the associated line.')
snaLinkSdlcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 2, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkSdlcPort.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkSdlcPort.setDescription('The physical port location in the system. The first octet contains the port processor id (0 to 255) and the second octet contains the line number (0-8) for line modules which support multiple lines. ')
snaLinkSdlcIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 2, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkSdlcIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkSdlcIfIndex.setDescription('The IfIndex value of the interface used by this link.')
snaLinkQllcTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3), )
if mibBuilder.loadTexts: snaLinkQllcTable.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcTable.setDescription('This table contains Managed Objects which describe basic configuration parameters for PUs using QLLC links.')
snaLinkQllcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3, 1), ).setIndexNames((0, "SNANET-MIB", "snaLinkQllcT5nodeIndex"), (0, "SNANET-MIB", "snaLinkQllcIndex"))
if mibBuilder.loadTexts: snaLinkQllcEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcEntry.setDescription('Entry contains all link configuration parameters for one PU. The objects in the entry have read-only access.')
snaLinkQllcT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkQllcT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcT5nodeIndex.setDescription('The index of the Type 5 node associated with this link.')
snaLinkQllcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkQllcIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcIndex.setDescription('The index variable assigned by Agent.')
snaLinkQllcLcn = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkQllcLcn.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcLcn.setDescription('The Logical Channel Number (LCN) used by the source node in the case of SVC. Identifies the Circuit number in the case of PVC.')
snaLinkQllcSourceDteAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkQllcSourceDteAddr.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcSourceDteAddr.setDescription('The DTE Address (15 bytes) of the source node.')
snaLinkQllcDestinationDteAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkQllcDestinationDteAddr.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcDestinationDteAddr.setDescription('The DTE Address (15 bytes) of the destination node.')
snaLinkQllcRole = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("secondary", 1), ("primary", 2), ("negotiable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkQllcRole.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcRole.setDescription('The role of the QLLC PU in relation to a remote one.')
snaLinkQllcPdnGroupName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkQllcPdnGroupName.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcPdnGroupName.setDescription('Name of the associated PDNGROUP.')
snaLinkQllcLineName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkQllcLineName.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcLineName.setDescription('The configured name of the associated line.')
snaLinkQllcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkQllcPort.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcPort.setDescription('The physical port location in the system. The first octet contains the port processor id (0 to 255) and the second octet contains the line number (0-8) for line modules which support multiple lines. ')
snaLinkQllcIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 3, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkQllcIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkQllcIfIndex.setDescription('The IfIndex value of the interface used by this link.')
snaLinkChannelTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 4), )
if mibBuilder.loadTexts: snaLinkChannelTable.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkChannelTable.setDescription('This table contains Managed Objects which describe basic configuration parameters for PUs using Channel link.')
snaLinkChannelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 4, 1), ).setIndexNames((0, "SNANET-MIB", "snaLinkChannelT5nodeIndex"), (0, "SNANET-MIB", "snaLinkChannelIndex"))
if mibBuilder.loadTexts: snaLinkChannelEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkChannelEntry.setDescription('Entry contains all link configuration parameters for one PU. The objects in the entry have read-only access.')
snaLinkChannelT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkChannelT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkChannelT5nodeIndex.setDescription('The index of the Type 5 node associated with this link.')
snaLinkChannelIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 4, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkChannelIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkChannelIndex.setDescription('The index variable assigned by Agent.')
snaLinkChannelLineName = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 4, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkChannelLineName.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkChannelLineName.setDescription('The configured name of the associated line.')
snaLinkChannelPort = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 4, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkChannelPort.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkChannelPort.setDescription('The physical port location in the system. The first octet contains the port processor id (0 to 255) and the second octet contains the line number (0-8) for line modules which support multiple lines. ')
snaLinkChannelIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 4, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkChannelIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkChannelIfIndex.setDescription('The IfIndex value of the interface used by this link.')
snaLinkIntTable = MibTable((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 5), )
if mibBuilder.loadTexts: snaLinkIntTable.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkIntTable.setDescription('This table contains Managed Objects which describe basic configuration parameters Internal links.')
snaLinkIntEntry = MibTableRow((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 5, 1), ).setIndexNames((0, "SNANET-MIB", "snaLinkIntT5nodeIndex"), (0, "SNANET-MIB", "snaLinkIntIndex"))
if mibBuilder.loadTexts: snaLinkIntEntry.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkIntEntry.setDescription('Entry contains all link configuration parameters for one PU. The objects in the entry have read-only access.')
snaLinkIntT5nodeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkIntT5nodeIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkIntT5nodeIndex.setDescription('The index of the Type 5 node associated with this link.')
snaLinkIntIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 5, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkIntIndex.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkIntIndex.setDescription('The index variable assigned by Agent.')
snaLinkIntServiceType = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("tglink", 1), ("uniscope", 2), ("ds3270", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkIntServiceType.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkIntServiceType.setDescription('The type of the internal link.')
snaLinkIntOutputCredit = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkIntOutputCredit.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkIntOutputCredit.setDescription('This value is used to control the flow of data on the internal link.')
snaLinkIntOutputPacing = MibTableColumn((1, 3, 6, 1, 4, 1, 223, 8, 3, 6, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snaLinkIntOutputPacing.setStatus('mandatory')
if mibBuilder.loadTexts: snaLinkIntOutputPacing.setDescription('This value is used to control the flow of data on the internal link.')
mibBuilder.exportSymbols("SNANET-MIB", snaLinkSdlcStationRole=snaLinkSdlcStationRole, t5CdrmNetworkName=t5CdrmNetworkName, t2nodeLinkType=t2nodeLinkType, appLuApplicationName=appLuApplicationName, snaLinkIntOutputPacing=snaLinkIntOutputPacing, snaSessProcCorrelationId=snaSessProcCorrelationId, t5CdrscDlmName=t5CdrscDlmName, t5DlmTsprof=t5DlmTsprof, t5DlmFmprof=t5DlmFmprof, snaLink=snaLink, saVrTable=saVrTable, t2nodeStatsActLus=t2nodeStatsActLus, t5nodeNetworkName=t5nodeNetworkName, t5CdrmSnaName=t5CdrmSnaName, t2nodeEntry=t2nodeEntry, t2nodeStatsSentPius=t2nodeStatsSentPius, snaLinkIntT5nodeIndex=snaLinkIntT5nodeIndex, saTgLinkIndex=saTgLinkIndex, snaLink802Dot2Entry=snaLink802Dot2Entry, t5CdrmT5nodeIndex=t5CdrmT5nodeIndex, t2nodeMaxPiu=t2nodeMaxPiu, t5CdrscOperState=t5CdrscOperState, appLuT5nodeIndex=appLuT5nodeIndex, snaLuT2nodeIndex=snaLuT2nodeIndex, snaSession=snaSession, snaLink802Dot2DestinationAddress=snaLink802Dot2DestinationAddress, t2nodeStatsInActLus=t2nodeStatsInActLus, snaLuTable=snaLuTable, snaSessionTable=snaSessionTable, prodInfoDesc=prodInfoDesc, t5CdrscSessions=t5CdrscSessions, appLuBatchDeviceLuIndex=appLuBatchDeviceLuIndex, saErTgNumber=saErTgNumber, snaLinkIntEntry=snaLinkIntEntry, snaLinkQllcSourceDteAddr=snaLinkQllcSourceDteAddr, t2nodeStatsBindLus=t2nodeStatsBindLus, snaLinkChannelT5nodeIndex=snaLinkChannelT5nodeIndex, saTgMaxReceivePiuSize=saTgMaxReceivePiuSize, saErNumber=saErNumber, snaLink802Dot2Index=snaLink802Dot2Index, t5nodeIndex=t5nodeIndex, t2nodeStatsEntry=t2nodeStatsEntry, saVrEntry=saVrEntry, snaLinkQllcIfIndex=snaLinkQllcIfIndex, saVrErNumber=saVrErNumber, t2nodeStatsReceivedPius=t2nodeStatsReceivedPius, t5DlmSecprot=t5DlmSecprot, snaSessTransmissionPriority=snaSessTransmissionPriority, t5DlmBindType=t5DlmBindType, saErDestinationSubarea=saErDestinationSubarea, t5CdrscAdminState=t5CdrscAdminState, t5AliasName=t5AliasName, snaLinkSdlcIfIndex=snaLinkSdlcIfIndex, t2nodeLastStateChange=t2nodeLastStateChange, saErTable=saErTable, t2nodeName=t2nodeName, saTgLinkT5nodeIndex=saTgLinkT5nodeIndex, t2nodeT5nodeIndex=t2nodeT5nodeIndex, t2nodeActFailureReason=t2nodeActFailureReason, snaLinkSdlcIndex=snaLinkSdlcIndex, t5CdrmName=t5CdrmName, snaSessReceivedNegativeResps=snaSessReceivedNegativeResps, snaLinkChannelTable=snaLinkChannelTable, snaSessType=snaSessType, snaLuEntry=snaLuEntry, snaLu=snaLu, snaLinkQllcLcn=snaLinkQllcLcn, saTgNumber=saTgNumber, appLuActiveTime=appLuActiveTime, t5nodeSscpName=t5nodeSscpName, dcp=dcp, saTgSentBytes=saTgSentBytes, t5AliasEntry=t5AliasEntry, t2nodeStartTime=t2nodeStartTime, t5DlmPsndpac=t5DlmPsndpac, t2nodeStatsIndex=t2nodeStatsIndex, snaLuOperState=snaLuOperState, t5DlmRusizes=t5DlmRusizes, snaLinkIntServiceType=snaLinkIntServiceType, saTgLinkEntry=saTgLinkEntry, snaSessState=snaSessState, snaLinkQllcEntry=snaLinkQllcEntry, t2nodeStatsTable=t2nodeStatsTable, t5CdrscEntry=t5CdrscEntry, saTgActiveTime=saTgActiveTime, t5nodeSscpId=t5nodeSscpId, snaLinkQllcTable=snaLinkQllcTable, snaLinkQllcDestinationDteAddr=snaLinkQllcDestinationDteAddr, snaLinkQllcLineName=snaLinkQllcLineName, snaSessSentRus=snaSessSentRus, t5nodeTable=t5nodeTable, appLuConversionType=appLuConversionType, t5CosName=t5CosName, t5node=t5node, saTgReceivedBtus=saTgReceivedBtus, saTgLinkReceivedBtus=saTgLinkReceivedBtus, snaLuAdminState=snaLuAdminState, snaSessPartnerNauSubarea=snaSessPartnerNauSubarea, t5CosSnaName=t5CosSnaName, t5DlmPrcvpac=t5DlmPrcvpac, snaSessVirtualRouteNumber=snaSessVirtualRouteNumber, saVrT5nodeIndex=saVrT5nodeIndex, saTgSentBtus=saTgSentBtus, snaLuName=snaLuName, saTgLinkAdjacentSubarea=saTgLinkAdjacentSubarea, snaLinkQllcT5nodeIndex=snaLinkQllcT5nodeIndex, snaLinkSdlcDestinationStationAddr=snaLinkSdlcDestinationStationAddr, appLuOperState=appLuOperState, t2node=t2node, appLuHostInterfaceType=appLuHostInterfaceType, saTgLinkReceivedBytes=saTgLinkReceivedBytes, saTransmissionGroup=saTransmissionGroup, appLuEntry=appLuEntry, saTgTable=saTgTable, t2nodeStatsSentNegativeResps=t2nodeStatsSentNegativeResps, t5DlmPservic=t5DlmPservic, saTgOperState=saTgOperState, snaLuIndex=snaLuIndex, snaLink802Dot2Role=snaLink802Dot2Role, applicationLuTable=applicationLuTable, snaLinkChannelPort=snaLinkChannelPort, appLuGatewayName=appLuGatewayName, saTgLinkSentBytes=saTgLinkSentBytes, appLuIndex=appLuIndex, appLuBatchDeviceTable=appLuBatchDeviceTable, snaLinkQllcPort=snaLinkQllcPort, snaLuPoolName=snaLuPoolName, t2nodeTable=t2nodeTable, snaSessionEntry=snaSessionEntry, saVrMinWindowSize=saVrMinWindowSize, snaLuType=snaLuType, snaLinkQllcIndex=snaLinkQllcIndex, t5nodeSubareaNumber=t5nodeSubareaNumber, t5CdrmElementAddress=t5CdrmElementAddress, snaLink802Dot2SourceSAP=snaLink802Dot2SourceSAP, appLuBatchDeviceName=appLuBatchDeviceName, saTgLinkTable=saTgLinkTable, t2nodeType=t2nodeType, t5DlmPriprot=t5DlmPriprot, snaLuUserName=snaLuUserName, prodInfo=prodInfo, snaSessActiveTime=snaSessActiveTime, t5AliasTable=t5AliasTable, saTgLinkAdminState=saTgLinkAdminState, saTgLinkLastStateChange=saTgLinkLastStateChange, saVrNumber=saVrNumber, t2nodeStatsT5nodeIndex=t2nodeStatsT5nodeIndex, saTgReceivedBytes=saTgReceivedBytes, snaLuLastStateChange=snaLuLastStateChange, t5nodePuName=t5nodePuName, snaLinkSdlcTable=snaLinkSdlcTable, saTgLinkTgNumber=saTgLinkTgNumber, appLuBindFailureReason=appLuBindFailureReason, snaLink802Dot2IfIndex=snaLink802Dot2IfIndex, snaSessNauSessNumber=snaSessNauSessNumber, snaLinkChannelIndex=snaLinkChannelIndex, t5CosT5nodeIndex=t5CosT5nodeIndex, snaLink802Dot2T5nodeIndex=snaLink802Dot2T5nodeIndex, snaLinkIntOutputCredit=snaLinkIntOutputCredit, snaLuT5nodeIndex=snaLuT5nodeIndex, t2nodeStatsReceivedBytes=t2nodeStatsReceivedBytes, t5CdrmType=t5CdrmType, saVrMaxWindowSize=saVrMaxWindowSize, t5DlmComprot=t5DlmComprot, saVrPacingCount=saVrPacingCount, t5AliasT5nodeIndex=t5AliasT5nodeIndex, t5CdrscSnaName=t5CdrscSnaName, snaLink802Dot2Table=snaLink802Dot2Table, t5DlmEncr=t5DlmEncr, t5DlmSrcvpac=t5DlmSrcvpac, t5nodeDomainName=t5nodeDomainName, snaLinkIntIndex=snaLinkIntIndex, t2nodeOperState=t2nodeOperState, snaLuBindFailureReason=snaLuBindFailureReason, appLuAdminState=appLuAdminState, saTgLinkType=saTgLinkType, appLuBatchDeviceType=appLuBatchDeviceType, t2nodeStatsSentBytes=t2nodeStatsSentBytes, saErT5nodeIndex=saErT5nodeIndex, snaLink802Dot2LineName=snaLink802Dot2LineName, t5DlmSsndpac=t5DlmSsndpac, snaSessReceivedRus=snaSessReceivedRus, t2nodeBlockNum=t2nodeBlockNum, t2nodeAdminState=t2nodeAdminState, appLuBatchDeviceT5nodeIndex=appLuBatchDeviceT5nodeIndex, t5CosEntry=t5CosEntry, snaLinkQllcRole=snaLinkQllcRole, saTgLastStateChange=saTgLastStateChange, snaSessSentNegativeResps=snaSessSentNegativeResps, snaLinkSdlcEntry=snaLinkSdlcEntry, t5DlmEntry=t5DlmEntry, t5CdrmOperState=t5CdrmOperState, saErOperState=saErOperState, snaLuLocalAddress=snaLuLocalAddress, snaSessModeName=snaSessModeName, snaLinkSdlcT5nodeIndex=snaLinkSdlcT5nodeIndex, t2nodeLinkSpecific=t2nodeLinkSpecific, t5CdrscCosName=t5CdrscCosName, t5CdrmEntry=t5CdrmEntry, saTgAdjacentSubarea=saTgAdjacentSubarea, saTgLinkOperState=saTgLinkOperState, t2nodeIdNum=t2nodeIdNum, t5CdrmSubareaNumber=t5CdrmSubareaNumber, snaSessPartnerNauName=snaSessPartnerNauName, snaLinkIntTable=snaLinkIntTable, snanet=snanet, snaNau=snaNau, snaSessPluIndicator=snaSessPluIndicator, snaLinkSdlcLineName=snaLinkSdlcLineName, snaLinkChannelIfIndex=snaLinkChannelIfIndex, t5DlmName=t5DlmName, applicationLu=applicationLu, snaLinkSdlcPort=snaLinkSdlcPort, t5CdrscName=t5CdrscName, appLuName=appLuName, appLuBatchDeviceEntry=appLuBatchDeviceEntry, t5DlmCos=t5DlmCos, saTgEntry=saTgEntry, snaSessReceivedBytes=snaSessReceivedBytes, appLuLastStateChange=appLuLastStateChange, saTgLinkName=saTgLinkName, subarea=subarea, snaLink802Dot2SourceAddress=snaLink802Dot2SourceAddress, saVrWindowSize=saVrWindowSize, snaSessT5nodeIndex=snaSessT5nodeIndex, snaLink802Dot2MediaType=snaLink802Dot2MediaType, t5CosTable=t5CosTable, saTgLinkSentBtus=saTgLinkSentBtus, snaSessNauName=snaSessNauName, t5CdrscTable=t5CdrscTable, t5nodeEntry=t5nodeEntry, t5DlmSnaName=t5DlmSnaName, saTgT5nodeIndex=saTgT5nodeIndex, saTgLinkSpecific=saTgLinkSpecific, t2nodeStatsReceivedNegativeResps=t2nodeStatsReceivedNegativeResps, snaSessPartnerNauElementAddress=snaSessPartnerNauElementAddress, snaLink802Dot2Port=snaLink802Dot2Port, saTgLinkActiveTime=saTgLinkActiveTime, snaLinkQllcPdnGroupName=snaLinkQllcPdnGroupName, saVrTransmissionPriority=saVrTransmissionPriority, t5DlmTable=t5DlmTable, snaLinkChannelEntry=snaLinkChannelEntry, unisys=unisys, snaLuActiveTime=snaLuActiveTime, t5AliasResourceId=t5AliasResourceId, t5CdrscCdrmName=t5CdrscCdrmName, saTgMaxSendPiuSize=saTgMaxSendPiuSize, t2nodeIndex=t2nodeIndex, appLuBatchDeviceNumber=appLuBatchDeviceNumber, snaLinkChannelLineName=snaLinkChannelLineName, t5CdrmTable=t5CdrmTable, t5CdrscT5nodeIndex=t5CdrscT5nodeIndex, t5DlmT5nodeIndex=t5DlmT5nodeIndex, t5nodeOperState=t5nodeOperState)
mibBuilder.exportSymbols("SNANET-MIB", prodInfoFeatures=prodInfoFeatures, saErEntry=saErEntry, t2nodeStatsActiveLus=t2nodeStatsActiveLus, snaSessNauElementAddress=snaSessNauElementAddress, snaSessSentBytes=snaSessSentBytes, t5CdrmAdminState=t5CdrmAdminState, t5CosVrids=t5CosVrids, snaLink802Dot2DestinationSAP=snaLink802Dot2DestinationSAP)
| 135.529639
| 8,910
| 0.775775
|
794e84947417442851a24dc0b8d47fc1781d42d2
| 1,915
|
py
|
Python
|
mayan/apps/document_parsing/tests/mixins.py
|
bonitobonita24/mayan-edms-v3.4.7
|
46604926e09b96716790c9aed462fe231968fd18
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/document_parsing/tests/mixins.py
|
bonitobonita24/mayan-edms-v3.4.7
|
46604926e09b96716790c9aed462fe231968fd18
|
[
"Apache-2.0"
] | 10
|
2021-03-19T23:48:12.000Z
|
2022-03-12T00:41:49.000Z
|
mayan/apps/document_parsing/tests/mixins.py
|
hidayath-ispace/mayan-edms
|
83e5d7e50fdace5cc1681b8b882193fef3053c70
|
[
"Apache-2.0"
] | null | null | null |
class DocumentContentToolsViewsTestMixin(object):
def _request_document_parsing_error_list_view(self):
return self.get(viewname='document_parsing:error_list')
def _request_document_parsing_tool_view(self):
return self.post(
viewname='document_parsing:document_type_submit', data={
'document_type': self.test_document_type.pk
}
)
class DocumentContentViewTestMixin(object):
def _request_test_document_content_delete_view(self):
return self.post(
viewname='document_parsing:document_content_delete', kwargs={
'document_id': self.test_document.pk
}
)
def _request_test_document_content_download_view(self):
return self.get(
viewname='document_parsing:document_content_download', kwargs={
'document_id': self.test_document.pk
}
)
def _request_test_document_content_view(self):
return self.get(
'document_parsing:document_content', kwargs={
'document_id': self.test_document.pk
}
)
def _request_test_document_page_content_view(self):
return self.get(
viewname='document_parsing:document_page_content', kwargs={
'document_page_id': self.test_document.pages.first().pk,
}
)
def _request_test_document_parsing_error_list_view(self):
return self.get(
viewname='document_parsing:document_parsing_error_list', kwargs={
'document_id': self.test_document.pk,
}
)
class DocumentTypeContentViewsTestMixin(object):
def _request_test_document_type_parsing_settings(self):
return self.get(
viewname='document_parsing:document_type_parsing_settings',
kwargs={'document_type_id': self.test_document_type.pk}
)
| 34.196429
| 77
| 0.655352
|
794e850cd225621cb748b68c3d1a46ddc4e23efb
| 5,567
|
py
|
Python
|
plugins/hg4idea/testData/bin/mercurial/hgweb/common.py
|
dmarcotte/intellij-community
|
74ed654c3f9ed99f9cc84fa227846b2c38d683c0
|
[
"Apache-2.0"
] | null | null | null |
plugins/hg4idea/testData/bin/mercurial/hgweb/common.py
|
dmarcotte/intellij-community
|
74ed654c3f9ed99f9cc84fa227846b2c38d683c0
|
[
"Apache-2.0"
] | null | null | null |
plugins/hg4idea/testData/bin/mercurial/hgweb/common.py
|
dmarcotte/intellij-community
|
74ed654c3f9ed99f9cc84fa227846b2c38d683c0
|
[
"Apache-2.0"
] | 1
|
2019-03-14T10:35:19.000Z
|
2019-03-14T10:35:19.000Z
|
# hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import errno, mimetypes, os
HTTP_OK = 200
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_SERVER_ERROR = 500
# Hooks for hgweb permission checks; extensions can add hooks here. Each hook
# is invoked like this: hook(hgweb, request, operation), where operation is
# either read, pull or push. Hooks should either raise an ErrorResponse
# exception, or just return.
# It is possible to do both authentication and authorization through this.
permhooks = []
def checkauthz(hgweb, req, op):
'''Check permission for operation based on request data (including
authentication info). Return if op allowed, else raise an ErrorResponse
exception.'''
user = req.env.get('REMOTE_USER')
deny_read = hgweb.configlist('web', 'deny_read')
if deny_read and (not user or deny_read == ['*'] or user in deny_read):
raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
allow_read = hgweb.configlist('web', 'allow_read')
result = (not allow_read) or (allow_read == ['*'])
if not (result or user in allow_read):
raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
if op == 'pull' and not hgweb.allowpull:
raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
elif op == 'pull' or op is None: # op is None for interface requests
return
# enforce that you can only push using POST requests
if req.env['REQUEST_METHOD'] != 'POST':
msg = 'push requires POST request'
raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
# require ssl by default for pushing, auth info cannot be sniffed
# and replayed
scheme = req.env.get('wsgi.url_scheme')
if hgweb.configbool('web', 'push_ssl', True) and scheme != 'https':
raise ErrorResponse(HTTP_OK, 'ssl required')
deny = hgweb.configlist('web', 'deny_push')
if deny and (not user or deny == ['*'] or user in deny):
raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
allow = hgweb.configlist('web', 'allow_push')
result = allow and (allow == ['*'] or user in allow)
if not result:
raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
# Add the default permhook, which provides simple authorization.
permhooks.append(checkauthz)
class ErrorResponse(Exception):
def __init__(self, code, message=None, headers=[]):
Exception.__init__(self)
self.code = code
self.headers = headers
if message is not None:
self.message = message
else:
self.message = _statusmessage(code)
def _statusmessage(code):
from BaseHTTPServer import BaseHTTPRequestHandler
responses = BaseHTTPRequestHandler.responses
return responses.get(code, ('Error', 'Unknown error'))[0]
def statusmessage(code, message=None):
return '%d %s' % (code, message or _statusmessage(code))
def get_mtime(spath):
cl_path = os.path.join(spath, "00changelog.i")
if os.path.exists(cl_path):
return os.stat(cl_path).st_mtime
else:
return os.stat(spath).st_mtime
def staticfile(directory, fname, req):
"""return a file inside directory with guessed Content-Type header
fname always uses '/' as directory separator and isn't allowed to
contain unusual path components.
Content-Type is guessed using the mimetypes module.
Return an empty string if fname is illegal or file not found.
"""
parts = fname.split('/')
for part in parts:
if (part in ('', os.curdir, os.pardir) or
os.sep in part or os.altsep is not None and os.altsep in part):
return ""
fpath = os.path.join(*parts)
if isinstance(directory, str):
directory = [directory]
for d in directory:
path = os.path.join(d, fpath)
if os.path.exists(path):
break
try:
os.stat(path)
ct = mimetypes.guess_type(path)[0] or "text/plain"
req.respond(HTTP_OK, ct, length = os.path.getsize(path))
return open(path, 'rb').read()
except TypeError:
raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
except OSError, err:
if err.errno == errno.ENOENT:
raise ErrorResponse(HTTP_NOT_FOUND)
else:
raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror)
def paritygen(stripecount, offset=0):
"""count parity of horizontal stripes for easier reading"""
if stripecount and offset:
# account for offset, e.g. due to building the list in reverse
count = (stripecount + offset) % stripecount
parity = (stripecount + offset) / stripecount & 1
else:
count = 0
parity = 0
while True:
yield parity
count += 1
if stripecount and count >= stripecount:
parity = 1 - parity
count = 0
def get_contact(config):
"""Return repo contact information or empty string.
web.contact is the primary source, but if that is not set, try
ui.username or $EMAIL as a fallback to display something useful.
"""
return (config("web", "contact") or
config("ui", "username") or
os.environ.get("EMAIL") or "")
| 35.916129
| 77
| 0.670559
|
794e86676817768b857b2153620496593827dd7a
| 108
|
py
|
Python
|
exceptions/__init__.py
|
jeremy-badcock/experiment-length-cli
|
96ce6f1c89dd83735d88a7806b80709dc8bb87fd
|
[
"MIT"
] | null | null | null |
exceptions/__init__.py
|
jeremy-badcock/experiment-length-cli
|
96ce6f1c89dd83735d88a7806b80709dc8bb87fd
|
[
"MIT"
] | null | null | null |
exceptions/__init__.py
|
jeremy-badcock/experiment-length-cli
|
96ce6f1c89dd83735d88a7806b80709dc8bb87fd
|
[
"MIT"
] | null | null | null |
from .cli import InvalidArgumentError, InvalidDateFormatError
from .experiment import InvalidDateRangeError
| 36
| 61
| 0.888889
|
794e86d4649ab161d2e025179ecd9984dd38d1e2
| 16,784
|
py
|
Python
|
yellowbrick/classifier/rocauc.py
|
Juan0001/yellowbrick
|
b2336e2b3e549bc3d9647c14893add7dd6bc8a2c
|
[
"Apache-2.0"
] | null | null | null |
yellowbrick/classifier/rocauc.py
|
Juan0001/yellowbrick
|
b2336e2b3e549bc3d9647c14893add7dd6bc8a2c
|
[
"Apache-2.0"
] | null | null | null |
yellowbrick/classifier/rocauc.py
|
Juan0001/yellowbrick
|
b2336e2b3e549bc3d9647c14893add7dd6bc8a2c
|
[
"Apache-2.0"
] | null | null | null |
# yellowbrick.classifier.rocauc
# Implements visual ROC/AUC curves for classification evaluation.
#
# Author: Rebecca Bilbro <rbilbro@districtdatalabs.com>
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Author: Neal Humphrey
# Created: Wed May 18 12:39:40 2016 -0400
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: rocauc.py [5388065] neal@nhumphrey.com $
"""
Implements visual ROC/AUC curves for classification evaluation.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from ..exceptions import ModelError
from ..style.palettes import LINE_COLOR
from .base import ClassificationScoreVisualizer
from scipy import interp
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, roc_curve
# Dictionary keys for ROCAUC
MACRO = "macro"
MICRO = "micro"
##########################################################################
## ROCAUC Visualizer
##########################################################################
class ROCAUC(ClassificationScoreVisualizer):
"""
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
model : estimator
Must be a classifier, otherwise raises YellowbrickTypeError
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
classes : list
A list of class names for the legend. If classes is None and a y value
is passed to fit then the classes are selected from the target vector.
Note that the curves must be computed based on what is in the target
vector passed to the ``score()`` method. Class names are used for
labeling only and must be in the correct order to prevent confusion.
micro : bool, default = True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes.
macro : bool, default = True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes.
per_class : bool, default = True
Plot the ROC curves for each individual class. Primarily this is set
to false if only the macro or micro average curves are required.
kwargs : keyword arguments passed to the super class.
Currently passing in hard-coded colors for the Receiver Operating
Characteristic curve and the diagonal.
These will be refactored to a default Yellowbrick style.
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso:: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import train_test_split
>>> data = load_breast_cancer()
>>> X = data['data']
>>> y = data['target']
>>> X_train, X_test, y_train, y_test = train_test_split(X, y)
>>> viz = ROCAUC(LogisticRegression())
>>> viz.fit(X_train, y_train)
>>> viz.score(X_test, y_test)
>>> viz.poof()
"""
def __init__(self, model, ax=None, classes=None,
micro=True, macro=True, per_class=True, **kwargs):
super(ROCAUC, self).__init__(model, ax=ax, classes=classes, **kwargs)
# Set the visual parameters for ROCAUC
self.micro = micro
self.macro = macro
self.per_class = per_class
def score(self, X, y=None, **kwargs):
"""
Generates the predicted target values using the Scikit-Learn
estimator.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score : float
The micro-average area under the curve of all classes.
"""
# Compute the predictions for the test data
y_pred = self._get_y_scores(X)
# # Classes may be label encoded so only use what's in y to compute.
# # The self.classes_ attribute will be used as names for labels.
classes = np.unique(y)
n_classes = len(classes)
# Store the false positive rate, true positive rate and curve info.
self.fpr = dict()
self.tpr = dict()
self.roc_auc = dict()
# Compute ROC curve and ROC area for each class
for i, c in enumerate(classes):
self.fpr[i], self.tpr[i], _ = roc_curve(y, y_pred[:,i], pos_label=c)
self.roc_auc[i] = auc(self.fpr[i], self.tpr[i])
# Compute micro average
if self.micro:
self._score_micro_average(y, y_pred, classes, n_classes)
# Compute macro average
if self.macro:
self._score_macro_average(n_classes)
# Draw the Curves
self.draw()
# Return micro average if specified
if self.micro:
return self.roc_auc[MICRO]
# Return macro average if not micro
if self.macro:
return self.roc_auc[MACRO]
# Return the base score if neither macro nor micro
return self.estimator.score(X, y)
def draw(self):
"""
Renders ROC-AUC plot.
Called internally by score, possibly more than once
Returns
-------
ax : the axis with the plotted figure
"""
colors = self.colors[0:len(self.classes_)]
n_classes = len(colors)
# Plot the ROC curves for each class
if self.per_class:
for i, color in zip(range(n_classes), colors):
self.ax.plot(
self.fpr[i], self.tpr[i], color=color,
label='ROC of class {}, AUC = {:0.2f}'.format(
self.classes_[i], self.roc_auc[i],
)
)
# Plot the ROC curve for the micro average
if self.micro:
self.ax.plot(
self.fpr[MICRO], self.tpr[MICRO], linestyle="--",
color= self.colors[len(self.classes_)-1],
label='micro-average ROC curve, AUC = {:0.2f}'.format(
self.roc_auc["micro"],
)
)
# Plot the ROC curve for the macro average
if self.macro:
self.ax.plot(
self.fpr[MACRO], self.tpr[MACRO], linestyle="--",
color= self.colors[len(self.classes_)-1],
label='macro-average ROC curve, AUC = {:0.2f}'.format(
self.roc_auc["macro"],
)
)
# Plot the line of no discrimination to compare the curve to.
self.ax.plot([0,1], [0,1], linestyle=':', c=LINE_COLOR)
return self.ax
def finalize(self, **kwargs):
"""
Finalize executes any subclass-specific axes finalization steps.
The user calls poof and poof calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Set the title and add the legend
self.set_title('ROC Curves for {}'.format(self.name))
self.ax.legend(loc='lower right', frameon=True)
# Set the limits for the ROC/AUC (always between 0 and 1)
self.ax.set_xlim([0.0, 1.0])
self.ax.set_ylim([0.0, 1.0])
# Set x and y axis labels
self.ax.set_ylabel('True Postive Rate')
self.ax.set_xlabel('False Positive Rate')
def _get_y_scores(self, X):
"""
The ``roc_curve`` metric requires target scores that can either be the
probability estimates of the positive class, confidence values or non-
thresholded measure of decisions (as returned by "decision_function").
This method computes the scores by resolving the estimator methods
that retreive these values.
.. todo:: implement confidence values metric.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features -- generally the test data
that is associated with y_true values.
"""
# The resolution order of scoring functions
attrs = (
'predict_proba',
'decision_function',
)
# Return the first resolved function
for attr in attrs:
try:
method = getattr(self.estimator, attr, None)
if method:
return method(X)
except AttributeError:
# Some Scikit-Learn estimators have both probability and
# decision functions but override __getattr__ and raise an
# AttributeError on access.
continue
# If we've gotten this far, raise an error
raise ModelError(
"ROCAUC requires estimators with predict_proba or "
"decision_function methods."
)
def _score_micro_average(self, y, y_pred, classes, n_classes):
"""
Compute the micro average scores for the ROCAUC curves.
"""
# Convert y to binarized array for micro and macro scores
y = label_binarize(y, classes=classes)
if n_classes == 2:
y = np.hstack((1-y, y))
# Compute micro-average
self.fpr[MICRO], self.tpr[MICRO], _ = roc_curve(y.ravel(), y_pred.ravel())
self.roc_auc[MICRO] = auc(self.fpr[MICRO], self.tpr[MICRO])
def _score_macro_average(self, n_classes):
"""
Compute the macro average scores for the ROCAUC curves.
"""
# Gather all FPRs
all_fpr = np.unique(np.concatenate([self.fpr[i] for i in range(n_classes)]))
avg_tpr = np.zeros_like(all_fpr)
# Compute the averages per class
for i in range(n_classes):
avg_tpr += interp(all_fpr, self.fpr[i], self.tpr[i])
# Finalize the average
avg_tpr /= n_classes
# Store the macro averages
self.fpr[MACRO] = all_fpr
self.tpr[MACRO] = avg_tpr
self.roc_auc[MACRO] = auc(self.fpr[MACRO], self.tpr[MACRO])
##########################################################################
## Quick method for ROCAUC
##########################################################################
def roc_auc(model, X, y=None, ax=None, **kwargs):
"""ROCAUC Quick method:
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
model : the Scikit-Learn estimator
Should be an instance of a classifier, else the __init__ will
return an error.
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : the axis to plot the figure on.
classes : list
A list of class names for the legend. If classes is None and a y value
is passed to fit then the classes are selected from the target vector.
Note that the curves must be computed based on what is in the target
vector passed to the ``score()`` method. Class names are used for
labeling only and must be in the correct order to prevent confusion.
micro : bool, default = True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes.
macro : bool, default = True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes.
per_class : bool, default = True
Plot the ROC curves for each individual class. Primarily this is set
to false if only the macro or micro average curves are required.
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso:: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from yellowbrick.classifier import roc_auc
>>> from sklearn.linear_model import LogisticRegression
>>> data = load_breast_cancer()
>>> roc_auc(LogisticRegression(), data.data, data.target)
Returns
-------
ax : matplotlib axes
Returns the axes that the roc-auc curve was drawn on.
"""
# Instantiate the visualizer
visualizer = ROCAUC(model, ax, **kwargs)
# Create the train and test splits
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X_train, y_train, **kwargs)
visualizer.score(X_test, y_test)
visualizer.finalize()
# Return the axes object on the visualizer
return visualizer.ax
| 38.058957
| 91
| 0.634295
|
794e871cb54dbdd706702778c4281a025954b744
| 218
|
py
|
Python
|
oauth/templatetags/__init__.py
|
sometimeslove/www.superstrong.com
|
8600706e526bf5a979ef095ca25ada09cf44d6ac
|
[
"MIT"
] | null | null | null |
oauth/templatetags/__init__.py
|
sometimeslove/www.superstrong.com
|
8600706e526bf5a979ef095ca25ada09cf44d6ac
|
[
"MIT"
] | 10
|
2020-06-06T01:55:42.000Z
|
2022-03-12T00:30:36.000Z
|
oauth/templatetags/__init__.py
|
sometimeslove/www.superstrong.com
|
8600706e526bf5a979ef095ca25ada09cf44d6ac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: superstrongz
@license: MIT Licence
@contact: 857508399@qq.com
@site: http://www.superstrongz.com/
@software: PyCharm
@file: __init__.py
@time: ??
"""
| 15.571429
| 35
| 0.683486
|
794e87cee969e30f1b35772f3c67266fe6f3ff97
| 4,730
|
py
|
Python
|
pyocd/coresight/generic_mem_ap.py
|
LONGZR007/pyOCD
|
2c5a20a267c2670db0c233487fefd262f5a7c181
|
[
"Apache-2.0"
] | null | null | null |
pyocd/coresight/generic_mem_ap.py
|
LONGZR007/pyOCD
|
2c5a20a267c2670db0c233487fefd262f5a7c181
|
[
"Apache-2.0"
] | null | null | null |
pyocd/coresight/generic_mem_ap.py
|
LONGZR007/pyOCD
|
2c5a20a267c2670db0c233487fefd262f5a7c181
|
[
"Apache-2.0"
] | null | null | null |
# pyOCD debugger
# Copyright (c) 2020 Cypress Semiconductor Corporation
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .component import CoreSightCoreComponent
from ..core.target import Target
from ..core.core_registers import CoreRegistersIndex
LOG = logging.getLogger(__name__)
DEAD_VALUE = 0
class GenericMemAPTarget(Target, CoreSightCoreComponent):
"""! @brief This target represents ARM debug Access Port without a CPU
It may be used to access the address space of the target via Access Ports
without real ARM CPU core behind it. For instance Cypress PSoC64 devices have
three APs implemented in the hardware:
* AP #0 -> CPU-less AHB AP
* AP #1 -> Cortex-M0+ AP
* AP #2 -> Cortex-M4F AP
Depending on the protection state, AP #1 and AP #2 can be permanently disabled.
This class allows to communicate with Secure FW running on the target via AP #0.
Most of the methods in this class (except memory access methods) are empty/dummy.
"""
def __init__(self, session, ap, memory_map=None, core_num=0, cmpid=None, address=None):
Target.__init__(self, session, memory_map)
CoreSightCoreComponent.__init__(self, ap, cmpid, address)
self.core_number = core_num
self.core_type = DEAD_VALUE
self._core_registers = CoreRegistersIndex()
self._target_context = None
def add_child(self, cmp):
pass
@property
def core_registers(self):
return self._core_registers
@property
def supported_security_states(self):
return Target.SecurityState.NONSECURE,
def init(self):
pass
def disconnect(self, resume=True):
pass
def write_memory(self, addr, value, transfer_size=32):
self.ap.write_memory(addr, value, transfer_size)
def read_memory(self, addr, transfer_size=32, now=True):
return self.ap.read_memory(addr, transfer_size, True)
def read_memory_block8(self, addr, size):
return self.ap.read_memory_block8(addr, size)
def write_memory_block8(self, addr, data):
self.ap.write_memory_block8(addr, data)
def write_memory_block32(self, addr, data):
self.ap.write_memory_block32(addr, data)
def read_memory_block32(self, addr, size):
return self.ap.read_memory_block32(addr, size)
def halt(self):
pass
def step(self, disable_interrupts=True, start=0, end=0):
pass
def reset(self, reset_type=None):
pass
def reset_and_halt(self, reset_type=None):
self.reset(reset_type)
def get_state(self):
return Target.State.HALTED
def get_security_state(self):
return Target.SecurityState.NONSECURE
def is_running(self):
return self.get_state() == Target.State.RUNNING
def is_halted(self):
return self.get_state() == Target.State.HALTED
def resume(self):
pass
def find_breakpoint(self, addr):
return None
def read_core_register(self, reg):
return DEAD_VALUE
def read_core_register_raw(self, reg):
return DEAD_VALUE
def read_core_registers_raw(self, reg_list):
return [DEAD_VALUE] * len(reg_list)
def write_core_register(self, reg, data):
pass
def write_core_register_raw(self, reg, data):
pass
def write_core_registers_raw(self, reg_list, data_list):
pass
def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO):
return False
def remove_breakpoint(self, addr):
pass
def get_breakpoint_type(self, addr):
return None
def set_watchpoint(self, addr, size, type):
return False
def remove_watchpoint(self, addr, size, type):
pass
def set_vector_catch(self, enable_mask):
pass
def get_vector_catch(self):
return 0
def get_halt_reason(self):
return Target.HaltReason.DEBUG
def get_target_context(self, core=None):
return self._target_context
def set_target_context(self, context):
self._target_context = context
def create_init_sequence(self):
pass
def mass_erase(self):
pass
| 27.988166
| 91
| 0.689006
|
794e898e03edef3950620d38cc7fc74d97d83c85
| 52
|
py
|
Python
|
lib/networks/__init__.py
|
bertid/clean-pvnet
|
8e1afdfe450c7d73274581d2907ad0215cba8331
|
[
"Apache-2.0"
] | 284
|
2019-12-14T08:09:40.000Z
|
2022-03-26T02:17:26.000Z
|
lib/networks/__init__.py
|
danikhani/clean-pvnet
|
4f91324c5bc9d2a05624f49c6cad15a33a446106
|
[
"Apache-2.0"
] | 208
|
2019-12-16T13:09:49.000Z
|
2022-03-25T07:38:20.000Z
|
lib/networks/__init__.py
|
danikhani/clean-pvnet
|
4f91324c5bc9d2a05624f49c6cad15a33a446106
|
[
"Apache-2.0"
] | 88
|
2019-12-14T12:33:51.000Z
|
2022-03-22T21:07:09.000Z
|
from .make_network import make_network, get_network
| 26
| 51
| 0.865385
|
794e8a542a4446ecbd75f8604ec31e76aa80142b
| 3,728
|
py
|
Python
|
src/trainModel.py
|
XiaominWuFred/autoGaming
|
9277f6d3c8508ef7c2fa187801b995469af3f3f5
|
[
"MIT"
] | null | null | null |
src/trainModel.py
|
XiaominWuFred/autoGaming
|
9277f6d3c8508ef7c2fa187801b995469af3f3f5
|
[
"MIT"
] | null | null | null |
src/trainModel.py
|
XiaominWuFred/autoGaming
|
9277f6d3c8508ef7c2fa187801b995469af3f3f5
|
[
"MIT"
] | null | null | null |
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
import math
from keras import Sequential
from keras.layers import Dense, Flatten, Activation, Dropout, Conv2D, MaxPooling2D
from PIL import Image
import random
def loadImgs():
Imgs = []
for i in range(1000):
image = Image.open('../train/shang/'+str(i)+'.png')
npImg = np.array(image)
Imgs.append([npImg,0])
print('loading shang: '+str(i))
for i in range(1000):
image = Image.open('../train/xia/'+str(i)+'.png')
npImg = np.array(image)
Imgs.append([npImg,1])
print('loading xia: '+str(i))
for i in range(1000):
image = Image.open('../train/zuo/'+str(i)+'.png')
npImg = np.array(image)
Imgs.append([npImg,2])
print('loading zuo: '+str(i))
for i in range(1000):
image = Image.open('../train/you/'+str(i)+'.png')
npImg = np.array(image)
Imgs.append([npImg,3])
print('loading you: '+str(i))
Imgs = np.array(Imgs)
print(Imgs.shape)
random.shuffle(Imgs)
imgs = []
labels = []
for each in Imgs:
imgs.append(each[0])
labels.append(each[1])
imgs = np.array(imgs)
labels = np.array(labels)
print(imgs.shape)
print(labels)
train_images = imgs[0:3600]
train_labels = labels[0:3600]
test_images = imgs[3600:]
test_labels = labels[3600:]
return (train_images, train_labels), (test_images, test_labels)
# Download MNIST dataset.
#mnist = keras.datasets.mnist
#(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
(train_images, train_labels), (test_images, test_labels) = loadImgs()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Show the first 25 images in the training dataset.
#show_sample(train_images,
#['Label: %s' % label for label in train_labels])
conv_net = Sequential()
conv_net.add(Conv2D(32, (3, 3), activation='relu', input_shape=(67,60,3))) #127, 134
# fully connected
conv_net.add(MaxPooling2D(pool_size=(3, 3)))
conv_net.add(Conv2D(64, (3, 3), activation='relu'))
conv_net.add(Flatten())
conv_net.add(Dense(32, activation='relu', use_bias=True))
conv_net.add(Dense(16, activation='relu', use_bias=True))
conv_net.add(Dense(4, activation='softmax', use_bias=True))
#conv_net.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model = conv_net
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
train_images = np.array(train_images)
train_images = train_images.reshape((train_images.shape[0],train_images.shape[1],train_images.shape[2],3))
model.fit(train_images, train_labels, epochs=50)
test_images = np.array(test_images)
test_images = test_images.reshape((test_images.shape[0],test_images.shape[1],test_images.shape[2],3))
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
# Predict the labels of digit images in our test dataset.
predictions = model.predict(test_images)
# Convert Keras model to TF Lite format.
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Save the TF Lite model as file
f = open('../models/shendu2.tflite', "wb")
f.write(tflite_model)
f.close()
print('finished')
| 29.354331
| 107
| 0.650751
|
794e8b1c4fb03341588e5a060f664f0a42c55394
| 551
|
py
|
Python
|
rqalpha/mod/rqalpha_mod_sys_simulation/testing.py
|
jandykwan/rqalpha
|
1ed12adb5337ad81cb0c662a45ff4c787b4f3313
|
[
"Apache-2.0"
] | 1
|
2019-04-22T14:29:24.000Z
|
2019-04-22T14:29:24.000Z
|
rqalpha/mod/rqalpha_mod_sys_simulation/testing.py
|
jandykwan/rqalpha
|
1ed12adb5337ad81cb0c662a45ff4c787b4f3313
|
[
"Apache-2.0"
] | 2
|
2018-07-31T08:42:11.000Z
|
2019-05-07T10:25:52.000Z
|
rqalpha/mod/rqalpha_mod_sys_simulation/testing.py
|
jandykwan/rqalpha
|
1ed12adb5337ad81cb0c662a45ff4c787b4f3313
|
[
"Apache-2.0"
] | 1
|
2021-11-03T15:42:57.000Z
|
2021-11-03T15:42:57.000Z
|
from rqalpha.utils.testing import EnvironmentFixture
class SimulationEventSourceFixture(EnvironmentFixture):
def __init__(self, *args, **kwargs):
super(SimulationEventSourceFixture, self).__init__(*args, **kwargs)
self.simulation_event_source = None
def init_fixture(self):
from rqalpha.mod.rqalpha_mod_sys_simulation.simulation_event_source import SimulationEventSource
super(SimulationEventSourceFixture, self).init_fixture()
self.simulation_event_source = SimulationEventSource(self.env)
| 39.357143
| 104
| 0.76588
|
794e8b48334faba064479a0a742ff5dc322b2964
| 4,046
|
py
|
Python
|
terra_sdk/protobuf/tendermint/crypto/keys_pb2.py
|
sejalsahni/terra.py
|
0fd84969441c58427a21448520697c3ab3ec2d0c
|
[
"MIT"
] | 24
|
2021-05-30T05:48:33.000Z
|
2021-10-07T04:47:15.000Z
|
terra_sdk/protobuf/tendermint/crypto/keys_pb2.py
|
sejalsahni/terra.py
|
0fd84969441c58427a21448520697c3ab3ec2d0c
|
[
"MIT"
] | 18
|
2021-05-30T09:05:26.000Z
|
2021-10-17T07:12:12.000Z
|
terra_sdk/protobuf/tendermint/crypto/keys_pb2.py
|
sejalsahni/terra.py
|
0fd84969441c58427a21448520697c3ab3ec2d0c
|
[
"MIT"
] | 10
|
2021-02-11T00:56:04.000Z
|
2021-05-27T08:37:49.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tendermint/crypto/keys.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="tendermint/crypto/keys.proto",
package="tendermint.crypto",
syntax="proto3",
serialized_options=b"Z8github.com/tendermint/tendermint/proto/tendermint/crypto",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1ctendermint/crypto/keys.proto\x12\x11tendermint.crypto\x1a\x14gogoproto/gogo.proto"D\n\tPublicKey\x12\x11\n\x07\x65\x64\x32\x35\x35\x31\x39\x18\x01 \x01(\x0cH\x00\x12\x13\n\tsecp256k1\x18\x02 \x01(\x0cH\x00:\x08\xe8\xa1\x1f\x01\xe8\xa0\x1f\x01\x42\x05\n\x03sumB:Z8github.com/tendermint/tendermint/proto/tendermint/cryptob\x06proto3',
dependencies=[
gogoproto_dot_gogo__pb2.DESCRIPTOR,
],
)
_PUBLICKEY = _descriptor.Descriptor(
name="PublicKey",
full_name="tendermint.crypto.PublicKey",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="ed25519",
full_name="tendermint.crypto.PublicKey.ed25519",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="secp256k1",
full_name="tendermint.crypto.PublicKey.secp256k1",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\350\241\037\001\350\240\037\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="sum",
full_name="tendermint.crypto.PublicKey.sum",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=73,
serialized_end=141,
)
_PUBLICKEY.oneofs_by_name["sum"].fields.append(_PUBLICKEY.fields_by_name["ed25519"])
_PUBLICKEY.fields_by_name["ed25519"].containing_oneof = _PUBLICKEY.oneofs_by_name["sum"]
_PUBLICKEY.oneofs_by_name["sum"].fields.append(_PUBLICKEY.fields_by_name["secp256k1"])
_PUBLICKEY.fields_by_name["secp256k1"].containing_oneof = _PUBLICKEY.oneofs_by_name[
"sum"
]
DESCRIPTOR.message_types_by_name["PublicKey"] = _PUBLICKEY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PublicKey = _reflection.GeneratedProtocolMessageType(
"PublicKey",
(_message.Message,),
{
"DESCRIPTOR": _PUBLICKEY,
"__module__": "tendermint.crypto.keys_pb2"
# @@protoc_insertion_point(class_scope:tendermint.crypto.PublicKey)
},
)
_sym_db.RegisterMessage(PublicKey)
DESCRIPTOR._options = None
_PUBLICKEY._options = None
# @@protoc_insertion_point(module_scope)
| 33.163934
| 358
| 0.673752
|
794e8b5b383d76ea0028f0618501d048a461317c
| 2,421
|
py
|
Python
|
doc/source/conf.py
|
jamielennox/jsonhome
|
b886b7167a57370707473b0535e03d0356b40b3e
|
[
"Apache-2.0"
] | 1
|
2016-08-01T13:08:36.000Z
|
2016-08-01T13:08:36.000Z
|
doc/source/conf.py
|
jamielennox/jsonhome
|
b886b7167a57370707473b0535e03d0356b40b3e
|
[
"Apache-2.0"
] | null | null | null |
doc/source/conf.py
|
jamielennox/jsonhome
|
b886b7167a57370707473b0535e03d0356b40b3e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'jsonhome'
copyright = u'2015, Jamie Lennox'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'Jamie Lennox', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 32.28
| 79
| 0.695993
|
794e8ba07ed5197e30226e9b074cfd8b8f9f210e
| 13,015
|
py
|
Python
|
nova/api/openstack/compute/simple_tenant_usage.py
|
hanlind/nova
|
658ade3aca1305e15c3b29dcced5f184159794cd
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/simple_tenant_usage.py
|
hanlind/nova
|
658ade3aca1305e15c3b29dcced5f184159794cd
|
[
"Apache-2.0"
] | 11
|
2017-06-19T01:28:55.000Z
|
2017-06-23T02:01:47.000Z
|
nova/api/openstack/compute/simple_tenant_usage.py
|
hanlind/nova
|
658ade3aca1305e15c3b29dcced5f184159794cd
|
[
"Apache-2.0"
] | 1
|
2020-07-22T22:06:24.000Z
|
2020-07-22T22:06:24.000Z
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from oslo_utils import timeutils
import six
import six.moves.urllib.parse as urlparse
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.views import usages as usages_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.policies import simple_tenant_usage as stu_policies
CONF = nova.conf.CONF
ALIAS = "os-simple-tenant-usage"
def parse_strtime(dstr, fmt):
try:
return timeutils.parse_strtime(dstr, fmt)
except (TypeError, ValueError) as e:
raise exception.InvalidStrTime(reason=six.text_type(e))
class SimpleTenantUsageController(wsgi.Controller):
_view_builder_class = usages_view.ViewBuilder
def _hours_for(self, instance, period_start, period_stop):
launched_at = instance.launched_at
terminated_at = instance.terminated_at
if terminated_at is not None:
if not isinstance(terminated_at, datetime.datetime):
# NOTE(mriedem): Instance object DateTime fields are
# timezone-aware so convert using isotime.
terminated_at = timeutils.parse_isotime(terminated_at)
if launched_at is not None:
if not isinstance(launched_at, datetime.datetime):
launched_at = timeutils.parse_isotime(launched_at)
if terminated_at and terminated_at < period_start:
return 0
# nothing if it started after the usage report ended
if launched_at and launched_at > period_stop:
return 0
if launched_at:
# if instance launched after period_started, don't charge for first
start = max(launched_at, period_start)
if terminated_at:
# if instance stopped before period_stop, don't charge after
stop = min(period_stop, terminated_at)
else:
# instance is still running, so charge them up to current time
stop = period_stop
dt = stop - start
return dt.total_seconds() / 3600.0
else:
# instance hasn't launched, so no charge
return 0
def _get_flavor(self, context, instance, flavors_cache):
"""Get flavor information from the instance object,
allowing a fallback to lookup by-id for deleted instances only.
"""
try:
return instance.get_flavor()
except exception.NotFound:
if not instance.deleted:
# Only support the fallback mechanism for deleted instances
# that would have been skipped by migration #153
raise
flavor_type = instance.instance_type_id
if flavor_type in flavors_cache:
return flavors_cache[flavor_type]
try:
flavor_ref = objects.Flavor.get_by_id(context, flavor_type)
flavors_cache[flavor_type] = flavor_ref
except exception.FlavorNotFound:
# can't bill if there is no flavor
flavor_ref = None
return flavor_ref
def _tenant_usages_for_period(self, context, period_start, period_stop,
tenant_id=None, detailed=True, limit=None,
marker=None):
instances = objects.InstanceList.get_active_by_window_joined(
context, period_start, period_stop, tenant_id,
expected_attrs=['flavor'], limit=limit, marker=marker)
rval = {}
flavors = {}
all_server_usages = []
for instance in instances:
info = {}
info['hours'] = self._hours_for(instance,
period_start,
period_stop)
flavor = self._get_flavor(context, instance, flavors)
if not flavor:
info['flavor'] = ''
else:
info['flavor'] = flavor.name
info['instance_id'] = instance.uuid
info['name'] = instance.display_name
info['tenant_id'] = instance.project_id
try:
info['memory_mb'] = instance.flavor.memory_mb
info['local_gb'] = (instance.flavor.root_gb +
instance.flavor.ephemeral_gb)
info['vcpus'] = instance.flavor.vcpus
except exception.InstanceNotFound:
# This is rare case, instance disappear during analysis
# As it's just info collection, we can try next one
continue
# NOTE(mriedem): We need to normalize the start/end times back
# to timezone-naive so the response doesn't change after the
# conversion to objects.
info['started_at'] = timeutils.normalize_time(instance.launched_at)
info['ended_at'] = (
timeutils.normalize_time(instance.terminated_at) if
instance.terminated_at else None)
if info['ended_at']:
info['state'] = 'terminated'
else:
info['state'] = instance.vm_state
now = timeutils.utcnow()
if info['state'] == 'terminated':
delta = info['ended_at'] - info['started_at']
else:
delta = now - info['started_at']
info['uptime'] = int(delta.total_seconds())
if info['tenant_id'] not in rval:
summary = {}
summary['tenant_id'] = info['tenant_id']
if detailed:
summary['server_usages'] = []
summary['total_local_gb_usage'] = 0
summary['total_vcpus_usage'] = 0
summary['total_memory_mb_usage'] = 0
summary['total_hours'] = 0
summary['start'] = timeutils.normalize_time(period_start)
summary['stop'] = timeutils.normalize_time(period_stop)
rval[info['tenant_id']] = summary
summary = rval[info['tenant_id']]
summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
summary['total_memory_mb_usage'] += (info['memory_mb'] *
info['hours'])
summary['total_hours'] += info['hours']
all_server_usages.append(info)
if detailed:
summary['server_usages'].append(info)
return list(rval.values()), all_server_usages
def _parse_datetime(self, dtstr):
if not dtstr:
value = timeutils.utcnow()
elif isinstance(dtstr, datetime.datetime):
value = dtstr
else:
for fmt in ["%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M:%S.%f",
"%Y-%m-%d %H:%M:%S.%f"]:
try:
value = parse_strtime(dtstr, fmt)
break
except exception.InvalidStrTime:
pass
else:
msg = _("Datetime is in invalid format")
raise exception.InvalidStrTime(reason=msg)
# NOTE(mriedem): Instance object DateTime fields are timezone-aware
# so we have to force UTC timezone for comparing this datetime against
# instance object fields and still maintain backwards compatibility
# in the API.
if value.utcoffset() is None:
value = value.replace(tzinfo=iso8601.iso8601.Utc())
return value
def _get_datetime_range(self, req):
qs = req.environ.get('QUERY_STRING', '')
env = urlparse.parse_qs(qs)
# NOTE(lzyeval): env.get() always returns a list
period_start = self._parse_datetime(env.get('start', [None])[0])
period_stop = self._parse_datetime(env.get('end', [None])[0])
if not period_start < period_stop:
msg = _("Invalid start time. The start time cannot occur after "
"the end time.")
raise exc.HTTPBadRequest(explanation=msg)
detailed = env.get('detailed', ['0'])[0] == '1'
return (period_start, period_stop, detailed)
@wsgi.Controller.api_version("2.40")
@extensions.expected_errors(400)
def index(self, req):
"""Retrieve tenant_usage for all tenants."""
return self._index(req, links=True)
@wsgi.Controller.api_version("2.1", "2.39") # noqa
@extensions.expected_errors(400)
def index(self, req):
"""Retrieve tenant_usage for all tenants."""
return self._index(req)
@wsgi.Controller.api_version("2.40")
@extensions.expected_errors(400)
def show(self, req, id):
"""Retrieve tenant_usage for a specified tenant."""
return self._show(req, id, links=True)
@wsgi.Controller.api_version("2.1", "2.39") # noqa
@extensions.expected_errors(400)
def show(self, req, id):
"""Retrieve tenant_usage for a specified tenant."""
return self._show(req, id)
def _index(self, req, links=False):
context = req.environ['nova.context']
context.can(stu_policies.POLICY_ROOT % 'list')
try:
(period_start, period_stop, detailed) = self._get_datetime_range(
req)
except exception.InvalidStrTime as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
now = timeutils.parse_isotime(timeutils.utcnow().isoformat())
if period_stop > now:
period_stop = now
marker = None
limit = CONF.api.max_limit
if links:
limit, marker = common.get_limit_and_marker(req)
try:
usages, server_usages = self._tenant_usages_for_period(
context, period_start, period_stop, detailed=detailed,
limit=limit, marker=marker)
except exception.MarkerNotFound as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
tenant_usages = {'tenant_usages': usages}
if links:
usages_links = self._view_builder.get_links(req, server_usages)
if usages_links:
tenant_usages['tenant_usages_links'] = usages_links
return tenant_usages
def _show(self, req, id, links=False):
tenant_id = id
context = req.environ['nova.context']
context.can(stu_policies.POLICY_ROOT % 'show',
{'project_id': tenant_id})
try:
(period_start, period_stop, ignore) = self._get_datetime_range(
req)
except exception.InvalidStrTime as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
now = timeutils.parse_isotime(timeutils.utcnow().isoformat())
if period_stop > now:
period_stop = now
marker = None
limit = CONF.api.max_limit
if links:
limit, marker = common.get_limit_and_marker(req)
try:
usage, server_usages = self._tenant_usages_for_period(
context, period_start, period_stop, tenant_id=tenant_id,
detailed=True, limit=limit, marker=marker)
except exception.MarkerNotFound as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if len(usage):
usage = list(usage)[0]
else:
usage = {}
tenant_usage = {'tenant_usage': usage}
if links:
usages_links = self._view_builder.get_links(
req, server_usages, tenant_id=tenant_id)
if usages_links:
tenant_usage['tenant_usage_links'] = usages_links
return tenant_usage
class SimpleTenantUsage(extensions.V21APIExtensionBase):
"""Simple tenant usage extension."""
name = "SimpleTenantUsage"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(ALIAS,
SimpleTenantUsageController())
resources.append(res)
return resources
def get_controller_extensions(self):
return []
| 36.558989
| 79
| 0.594622
|
794e8be7b16a15df9dac4aeeb3260af74c7f8925
| 411
|
py
|
Python
|
.history/py/UserInput_20201230130306.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/py/UserInput_20201230130306.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/py/UserInput_20201230130306.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
# person = input("Enter your name: ") # Carl
# print("Hello ", person) # Hello Carl
# x = input("Enter a number: ") # 8
# y = input("Enter another number: ") # 7
# z = x + y
# print(z) # 87 ## this was the result of concatenation
person = input("Enter your name: ") #
print("Hello ", person)
x = input("Enter a number: ") # 8
y = input("Enter another number: ") # 7
z = print(int(x) +int(y))
| 21.631579
| 58
| 0.576642
|
794e8cbdb82c51c415bad4fcef252a278b27865c
| 40,415
|
py
|
Python
|
sx/pisa3/pisa_context.py
|
kravciuk/pisa
|
39a7e9379cf059f11fea54127c8bfb242ad477e6
|
[
"Apache-2.0"
] | null | null | null |
sx/pisa3/pisa_context.py
|
kravciuk/pisa
|
39a7e9379cf059f11fea54127c8bfb242ad477e6
|
[
"Apache-2.0"
] | null | null | null |
sx/pisa3/pisa_context.py
|
kravciuk/pisa
|
39a7e9379cf059f11fea54127c8bfb242ad477e6
|
[
"Apache-2.0"
] | 1
|
2021-06-10T20:46:31.000Z
|
2021-06-10T20:46:31.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 20 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-09 12:58:24 +0200 (Di, 09 Okt 2007) $"
from pisa_util import *
from pisa_reportlab import *
import pisa_default
import pisa_parser
import re
import urlparse
import types
from reportlab.platypus.paraparser import ParaParser, ParaFrag, ps2tt, tt2ps, ABag
from reportlab.platypus.paragraph import cleanBlockQuotedText
from reportlab.lib.styles import ParagraphStyle
import reportlab.rl_config
reportlab.rl_config.warnOnMissingFontGlyphs = 0
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.lib.fonts import addMapping
from sx.w3c import css, cssDOMElementInterface
from html5lib.sanitizer import *
import logging
log = logging.getLogger("ho.pisa")
sizeDelta = 2 # amount to reduce font size by for super and sub script
subFraction = 0.4 # fraction of font size that a sub script should be lowered
superFraction = 0.4
NBSP = u"\u00a0"
def clone(self, **kwargs):
n = ParaFrag(**self.__dict__)
if kwargs:
d = n.__dict__
d.update(kwargs)
# This else could cause trouble in Paragraphs with images etc.
if "cbDefn" in d:
del d["cbDefn"]
n.bulletText = None
return n
ParaFrag.clone = clone
def getParaFrag(style):
frag = ParaFrag()
frag.sub = 0
frag.super = 0
frag.rise = 0
frag.underline = 0 # XXX Need to be able to set color to fit CSS tests
frag.strike = 0
frag.greek = 0
frag.link = None
frag.text = ""
# frag.lineBreak = 0
#if bullet:
# frag.fontName, frag.bold, frag.italic = ps2tt(style.bulletFontName)
# frag.fontSize = style.bulletFontSize
# frag.textColor = hasattr(style,'bulletColor') and style.bulletColor or style.textColor
#else:
frag.fontName = "Times-Roman"
frag.fontName, frag.bold, frag.italic = ps2tt(style.fontName)
frag.fontSize = style.fontSize
frag.textColor = style.textColor
# Extras
frag.leading = 0
frag.leadingSource = "150%"
frag.leadingSpace = 0
frag.backColor = None
frag.spaceBefore = 0
frag.spaceAfter = 0
frag.leftIndent = 0
frag.rightIndent = 0
frag.firstLineIndent = 0
frag.keepWithNext = False
frag.alignment = TA_LEFT
frag.vAlign = None
frag.borderWidth = 1
frag.borderStyle = None
frag.borderPadding = 0
frag.borderColor = None
frag.borderLeftWidth = frag.borderWidth
frag.borderLeftColor = frag.borderColor
frag.borderLeftStyle = frag.borderStyle
frag.borderRightWidth = frag.borderWidth
frag.borderRightColor = frag.borderColor
frag.borderRightStyle = frag.borderStyle
frag.borderTopWidth = frag.borderWidth
frag.borderTopColor = frag.borderColor
frag.borderTopStyle = frag.borderStyle
frag.borderBottomWidth = frag.borderWidth
frag.borderBottomColor = frag.borderColor
frag.borderBottomStyle = frag.borderStyle
frag.paddingLeft = 0
frag.paddingRight = 0
frag.paddingTop = 0
frag.paddingBottom = 0
frag.listStyleType = None
frag.listStyleImage = None
frag.whiteSpace = "normal"
frag.pageNumber = False
frag.height = None
frag.width = None
frag.bulletIndent = 0
frag.bulletText = None
frag.bulletFontName = "Helvetica"
frag.zoom = 1.0
frag.outline = False
frag.outlineLevel = 0
frag.outlineOpen = False
frag.keepInFrameMode = "shrink"
#frag.keepInFrameMaxWidth = None
#frag.keepInFrameMaxHeight = None
frag.insideStaticFrame = 0
return frag
def getDirName(path):
if path and not (path.lower().startswith("http:") or path.lower().startswith("https:")):
return os.path.dirname(os.path.abspath(path))
return path
class pisaCSSBuilder(css.CSSBuilder):
def atFontFace(self, declarations):
" Embed fonts "
result = self.ruleset([self.selector('*')], declarations)
# print "@FONT-FACE", declarations, result
try:
data = result[0].values()[0]
names = data["font-family"]
# Font weight
fweight = str(data.get("font-weight", "normal")).lower()
bold = fweight in ("bold", "bolder", "500", "600", "700", "800", "900")
if not bold and fweight <> "normal":
log.warn(self.c.warning("@fontface, unknown value font-weight '%s'", fweight))
# Font style
italic = str(data.get("font-style", "")).lower() in ("italic", "oblique")
src = self.c.getFile(data["src"])
self.c.loadFont(
names,
src,
bold=bold,
italic=italic)
except Exception, e:
log.warn(self.c.warning("@fontface"), exc_info=1)
return {}, {}
def _pisaDimensions(self, data, width, height):
" Calculate dimensions of a box "
# print data, width, height
box = data.get("-pdf-frame-box", [])
# print 123, box
if len(box) == 4:
return [getSize(x) for x in box]
top = getSize(data.get("top", 0), height)
left = getSize(data.get("left", 0), width)
bottom = - getSize(data.get("bottom", 0), height)
right = - getSize(data.get("right", 0), width)
w = getSize(data.get("width", 0), width, default=None)
h = getSize(data.get("height", 0), height, default=None)
#print width, height, top, left, bottom, right, w, h
if "height" in data:
if "bottom" in data:
top = bottom - h
else:
bottom = top + h
if "width" in data:
if "right" in data:
# print right, w
left = right - w
else:
right = left + w
top += getSize(data.get("margin-top", 0), height)
left += getSize(data.get("margin-left", 0), width)
bottom -= getSize(data.get("margin-bottom", 0), height)
right -= getSize(data.get("margin-right", 0), width)
# box = getCoords(left, top, width, height, self.c.pageSize)
# print "BOX", box
# print top, left, w, h
return left, top, right, bottom
def _pisaAddFrame(self, name, data, first=False, border=None, size=(0,0)):
c = self.c
if not name:
name = "-pdf-frame-%d" % c.UID()
x, y, w, h = self._pisaDimensions(data, size[0], size[1])
# print name, x, y, w, h
#if not (w and h):
# return None
if first:
return (
name,
None,
data.get("-pdf-frame-border", border),
x, y, w, h)
return (
name,
data.get("-pdf-frame-content", None),
data.get("-pdf-frame-border", border),
x, y, w, h)
def atPage(self, name, pseudopage, declarations):
try:
c = self.c
data = {}
name = name or "body"
pageBorder = None
if declarations:
result = self.ruleset([self.selector('*')], declarations)
# print "@PAGE", name, pseudopage, declarations, result
if declarations:
data = result[0].values()[0]
pageBorder = data.get("-pdf-frame-border", None)
if c.templateList.has_key(name):
log.warn(self.c.warning("template '%s' has already been defined", name))
if data.has_key("-pdf-page-size"):
c.pageSize = pisa_default.PML_PAGESIZES.get(str(data["-pdf-page-size"]).lower(), c.pageSize)
if data.has_key("size"):
size = data["size"]
# print size, c.pageSize
if type(size) is not types.ListType:
size = [size]
isLandscape = False
sizeList = []
for value in size:
valueStr = str(value).lower()
if type(value) is types.TupleType:
sizeList.append(getSize(value))
elif valueStr == "landscape":
isLandscape = True
elif pisa_default.PML_PAGESIZES.has_key(valueStr):
c.pageSize = pisa_default.PML_PAGESIZES[valueStr]
else:
log.warn(c.warning("Unknown size value for @page"))
if len(sizeList) == 2:
c.pageSize = sizeList
if isLandscape:
c.pageSize = landscape(c.pageSize)
for prop in [
"margin-top",
"margin-left",
"margin-right",
"margin-bottom",
"top",
"left",
"right",
"bottom",
"width",
"height"
]:
if data.has_key(prop):
c.frameList.append(self._pisaAddFrame(name, data, first=True, border=pageBorder, size=c.pageSize))
break
# self._drawing = PmlPageDrawing(self._pagesize)
#if not c.frameList:
# c.warning("missing frame definitions for template")
# return {}, {}
# Frames have to be calculated after we know the pagesize
frameList = []
staticList = []
for fname, static, border, x, y, w, h in c.frameList:
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(self.c.warning("Negative width or height of frame. Check @frame definitions."))
frame = Frame(
x, y, w, h,
id=fname,
leftPadding=0,
rightPadding=0,
bottomPadding=0,
topPadding=0,
showBoundary=border or pageBorder)
if static:
frame.pisaStaticStory = []
c.frameStatic[static] = [frame] + c.frameStatic.get(static, [])
staticList.append(frame)
else:
frameList.append(frame)
background = data.get("background-image", None)
if background:
background = self.c.getFile(background)
# print background
# print frameList
if not frameList:
# print 999
log.warn(c.warning("missing explicit frame definition for content or just static frames"))
fname, static, border, x, y, w, h = self._pisaAddFrame(name, data, first=True, border=pageBorder, size=c.pageSize)
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(c.warning("Negative width or height of frame. Check @page definitions."))
frameList.append(Frame(
x, y, w, h,
id=fname,
leftPadding=0,
rightPadding=0,
bottomPadding=0,
topPadding=0,
showBoundary=border or pageBorder))
pt = PmlPageTemplate(
id=name,
frames=frameList,
pagesize=c.pageSize,
)
pt.pisaStaticList = staticList
pt.pisaBackground = background
pt.pisaBackgroundList = c.pisaBackgroundList
# self._pagesize)
# pt.pml_statics = self._statics
# pt.pml_draw = self._draw
# pt.pml_drawing = self._drawing
# pt.pml_background = attrs.background
# pt.pml_bgstory = self._bgstory
c.templateList[name] = pt
c.template = None
c.frameList = []
c.frameStaticList = []
except Exception, e:
log.warn(self.c.warning("@page"), exc_info=1)
return {}, {}
def atFrame(self, name, declarations):
if declarations:
result = self.ruleset([self.selector('*')], declarations)
# print "@BOX", name, declarations, result
try:
data = result[0]
if data:
data = data.values()[0]
self.c.frameList.append(
self._pisaAddFrame(
name,
data,
size=self.c.pageSize))
except Exception, e:
log.warn(self.c.warning("@frame"), exc_info=1)
return {}, {}
class pisaCSSParser(css.CSSParser):
def parseExternal(self, cssResourceName):
# print "@import", self.rootPath, cssResourceName
oldRootPath = self.rootPath
cssFile = self.c.getFile(cssResourceName, relative=self.rootPath)
result = []
if not cssFile:
return None
if self.rootPath and (self.rootPath.startswith("http:") or self.rootPath.startswith("https:")):
self.rootPath = urlparse.urljoin(self.rootPath, cssResourceName)
else:
self.rootPath = getDirName(cssFile.uri)
# print "###", self.rootPath
result = self.parse(cssFile.getData())
self.rootPath = oldRootPath
return result
class pisaContext:
"""
Helper class for creation of reportlab story and container for
varoius data.
"""
def __init__(self, path, debug=0, capacity=-1):
self.fontList = copy.copy(pisa_default.DEFAULT_FONT)
self.path = []
self.capacity=capacity
self.node = None
self.toc = PmlTableOfContents()
self.story = []
self.text = []
self.log = []
self.err = 0
self.warn = 0
self.text = u""
self.uidctr = 0
self.multiBuild = False
self.pageSize = A4
self.template = None
self.templateList = {}
self.frameList = []
self.frameStatic = {}
self.frameStaticList = []
self.pisaBackgroundList = []
self.baseFontSize = getSize("12pt")
self.anchorFrag = []
self.anchorName = []
self.tableData = None
self.frag = self.fragBlock = getParaFrag(ParagraphStyle('default%d' % self.UID()))
self.fragList = []
self.fragAnchor = []
self.fragStack = []
self.fragStrip = True
self.listCounter = 0
self.cssText = ""
self.image = None
self.imageData = {}
self.force = False
self.pathCallback = None # External callback function for path calculations
# Store path to document
self.pathDocument = path or "__dummy__"
if not (self.pathDocument.lower().startswith("http:") or self.pathDocument.lower().startswith("https:")):
self.pathDocument = os.path.abspath(self.pathDocument)
self.pathDirectory = getDirName(self.pathDocument)
self.meta = dict(
author="",
title="",
subject="",
keywords="",
pagesize=A4,
)
def UID(self):
self.uidctr += 1
return self.uidctr
# METHODS FOR CSS
def addCSS(self, value):
value = value.strip()
if value.startswith("<![CDATA["):
value = value[9: - 3]
if value.startswith("<!--"):
value = value[4: - 3]
self.cssText += value.strip() + "\n"
def parseCSS(self):
#print repr(self.cssText)
# self.debug(9, self.cssText)
# XXX Must be handled in a better way!
#self.cssText = self.cssText.replace("<!--", "\n")
#self.cssText = self.cssText.replace("-->", "\n")
#self.cssText = self.cssText.replace("<![CDATA[", "\n")
#self.cssText = self.cssText.replace("]]>", "\n")
#self.debug(9, self.cssText)
# print repr(self.cssText)
# file("pisa.css", "wb").write(self.cssText.encode("utf8"))
# self.cssText = re.compile(r"url\((.*?\))", re.M).sub('"\1"', self.cssText)
# self.cssText = re.compile(r"\-moz\-.*?([\;\}]+)", re.M).sub(r"\1", self.cssText)
# XXX Import has to be implemented!
# self.cssText = re.compile(r"\@import.*;", re.M).sub("", self.cssText)
# if 0:
# try:
# # Sanitize CSS
# import cssutils
# import logging
# cssutils.log.setlog(logging.getLogger('csslog'))
# cssutils.log.setloglevel(logging.DEBUG)
# sheet = cssutils.parseString(self.cssText)
# self.cssText = sheet.cssText
# #err = csslog.getvalue()
# except ImportError, e:
# pass
# except Exception, e:
# log.exception(self.error("Error parsing CSS by cssutils"))
# print self.cssText
# file("pisa-sanitized.css", "w").write(self.cssText.encode("utf8"))
# print self.cssText
self.cssBuilder = pisaCSSBuilder(mediumSet=["all", "print", "pdf"])
self.cssBuilder.c = self
self.cssParser = pisaCSSParser(self.cssBuilder)
self.cssParser.rootPath = self.pathDirectory
self.cssParser.c = self
self.css = self.cssParser.parse(self.cssText)
self.cssCascade = css.CSSCascadeStrategy(self.css)
self.cssCascade.parser = self.cssParser
# METHODS FOR STORY
def addStory(self, data):
self.story.append(data)
def swapStory(self, story=[]):
self.story, story = copy.copy(story), copy.copy(self.story)
return story
def toParagraphStyle(self, first):
style = ParagraphStyle('default%d' % self.UID(), keepWithNext=first.keepWithNext)
style.fontName = first.fontName
style.fontSize = first.fontSize
style.leading = max(first.leading + first.leadingSpace, first.fontSize * 1.25)
style.backColor = first.backColor
style.spaceBefore = first.spaceBefore
style.spaceAfter = first.spaceAfter
style.leftIndent = first.leftIndent
style.rightIndent = first.rightIndent
style.firstLineIndent = first.firstLineIndent
style.textColor = first.textColor
style.alignment = first.alignment
style.bulletFontName = first.bulletFontName or first.fontName
style.bulletFontSize = first.fontSize
style.bulletIndent = first.bulletIndent
# Border handling for Paragraph
# Transfer the styles for each side of the border, *not* the whole
# border values that reportlab supports. We'll draw them ourselves in
# PmlParagraph.
style.borderTopStyle = first.borderTopStyle
style.borderTopWidth = first.borderTopWidth
style.borderTopColor = first.borderTopColor
style.borderBottomStyle = first.borderBottomStyle
style.borderBottomWidth = first.borderBottomWidth
style.borderBottomColor = first.borderBottomColor
style.borderLeftStyle = first.borderLeftStyle
style.borderLeftWidth = first.borderLeftWidth
style.borderLeftColor = first.borderLeftColor
style.borderRightStyle = first.borderRightStyle
style.borderRightWidth = first.borderRightWidth
style.borderRightColor = first.borderRightColor
# If no border color is given, the text color is used (XXX Tables!)
if (style.borderTopColor is None) and style.borderTopWidth:
style.borderTopColor = first.textColor
if (style.borderBottomColor is None) and style.borderBottomWidth:
style.borderBottomColor = first.textColor
if (style.borderLeftColor is None) and style.borderLeftWidth:
style.borderLeftColor = first.textColor
if (style.borderRightColor is None) and style.borderRightWidth:
style.borderRightColor = first.textColor
style.borderPadding = first.borderPadding
style.paddingTop = first.paddingTop
style.paddingBottom = first.paddingBottom
style.paddingLeft = first.paddingLeft
style.paddingRight = first.paddingRight
# This is the old code replaced by the above, kept for reference
#style.borderWidth = 0
#if getBorderStyle(first.borderTopStyle):
# style.borderWidth = max(first.borderLeftWidth, first.borderRightWidth, first.borderTopWidth, first.borderBottomWidth)
# style.borderPadding = first.borderPadding # + first.borderWidth
# style.borderColor = first.borderTopColor
# # If no border color is given, the text color is used (XXX Tables!)
# if (style.borderColor is None) and style.borderWidth:
# style.borderColor = first.textColor
style.fontName = tt2ps(first.fontName, first.bold, first.italic)
return style
def addTOC(self):
# style = copy.deepcopy(self.toParagraphStyle(self.fragBlock))
#cssAttrs = copy.deepcopy(self.node.cssAttrs)
#frag = copy.deepcopy(self.frag)
styles = []
for i in range(0, 20):
self.node.attributes["class"] = "pdftoclevel%d" % i
#self.node.cssAttrs = copy.deepcopy(cssAttrs)
#self.frag = copy.deepcopy(frag)
self.cssAttr = pisa_parser.CSSCollect(self.node, self)
pisa_parser.CSS2Frag(self, {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}, True)
pstyle = self.toParagraphStyle(self.frag)
#styles.append(copy.deepcopy(pstyle))
styles.append(pstyle)
# log.warn("%r", self.fragBlock.textColor)
self.toc.levelStyles = styles
self.addStory(self.toc)
def dumpPara(self, frags, style):
return
print "%s/%s %s *** PARA" % (style.fontSize, style.leading, style.fontName)
for frag in frags:
print "%s/%s %r %r" % (
frag.fontSize,
frag.leading,
getattr(frag, "cbDefn", None),
frag.text)
print
def addPara(self, force=False):
# print self.force, repr(self.text)
force = (force or self.force)
self.force = False
# Cleanup the trail
try:
rfragList = reversed(self.fragList)
except:
# For Python 2.3 compatibility
rfragList = copy.copy(self.fragList)
rfragList.reverse()
#for frag in rfragList:
# frag.text = frag.text.rstrip()
# if frag.text:
# break
# Find maximum lead
maxLeading = 0
#fontSize = 0
for frag in self.fragList:
leading = getSize(frag.leadingSource, frag.fontSize) + frag.leadingSpace
maxLeading = max(leading, frag.fontSize + frag.leadingSpace, maxLeading)
frag.leading = leading
if force or (self.text.strip() and self.fragList):
# Strip trailing whitespaces
#for f in self.fragList:
# f.text = f.text.lstrip()
# if f.text:
# break
#self.fragList[-1].lineBreak = self.fragList[-1].text.rstrip()
# Update paragraph style by style of first fragment
first = self.fragBlock
style = self.toParagraphStyle(first)
# style.leading = first.leading + first.leadingSpace
if first.leadingSpace:
style.leading = maxLeading
else:
style.leading = getSize(first.leadingSource, first.fontSize) + first.leadingSpace
# style.leading = maxLeading # + first.leadingSpace
#style.fontSize = fontSize
# borderRadius: None,
# print repr(self.text.strip()), style.leading, "".join([repr(x.text) for x in self.fragList])
# print first.leftIndent, first.listStyleType,repr(self.text)
bulletText = copy.copy(first.bulletText)
first.bulletText = None
# Add paragraph to story
if force or len(self.fragAnchor + self.fragList) > 0:
# We need this empty fragment to work around problems in
# Reportlab paragraphs regarding backGround etc.
if self.fragList:
self.fragList.append(self.fragList[ - 1].clone(text=''))
else:
blank = self.frag.clone()
blank.fontName = "Helvetica"
blank.text = ''
self.fragList.append(blank)
self.dumpPara(self.fragAnchor + self.fragList, style)
para = PmlParagraph(
self.text,
style,
frags=self.fragAnchor + self.fragList,
bulletText=bulletText)
# Mirrored and BIDI
#import unicodedata
#for c in self.text:
# print unicodedata.bidirectional(c),
para.outline = first.outline
para.outlineLevel = first.outlineLevel
para.outlineOpen = first.outlineOpen
para.keepWithNext = first.keepWithNext
para.autoLeading = "max"
if self.image:
para = PmlParagraphAndImage(
para,
self.image,
side=self.imageData.get("align", "left"))
self.addStory(para)
self.fragAnchor = []
first.bulletText = None
# Reset data
self.image = None
self.imageData = {}
self.clearFrag()
# METHODS FOR FRAG
def clearFrag(self):
self.fragList = []
self.fragStrip = True
self.text = u""
def copyFrag(self, **kw):
return self.frag.clone(**kw)
def newFrag(self, **kw):
self.frag = self.frag.clone(**kw)
return self.frag
def _appendFrag(self, frag):
if frag.link and frag.link.startswith("#"):
self.anchorFrag.append((frag, frag.link[1:]))
self.fragList.append(frag)
# XXX Argument frag is useless!
def addFrag(self, text="", frag=None):
frag = baseFrag = self.frag.clone()
# if sub and super are both on they will cancel each other out
if frag.sub == 1 and frag.super == 1:
frag.sub = 0
frag.super = 0
# XXX Has to be replaced by CSS styles like vertical-align and font-size
if frag.sub:
frag.rise = - frag.fontSize * subFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
elif frag.super:
frag.rise = frag.fontSize * superFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
# XXX Unused?
#if frag.greek:
# frag.fontName = 'symbol'
# text = _greekConvert(text)
# bold, italic, and underline
frag.fontName = frag.bulletFontName = tt2ps(frag.fontName, frag.bold, frag.italic)
# print frag.bulletFontName
# Modify text for optimal whitespace handling
# XXX Support Unicode whitespaces?
# XXX What about images?
# XXX Doesn't work with Reportlab > 2.1
# NBSP = '\xc2\xa0' # u"_"
#if REPORTLAB22:
# NBSP = u" "
# Replace ­ with empty and normalize NBSP
text = (text
.replace(u"\xad", u"")
.replace(u"\xc2\xa0", NBSP)
.replace(u"\xa0", NBSP))
# log.debug("> %r", text)
if frag.whiteSpace == "pre":
# Handle by lines
for text in re.split(r'(\r\n|\n|\r)', text):
# This is an exceptionally expensive piece of code
self.text += text
if ("\n" in text) or ("\r" in text):
# If EOL insert a linebreak
frag = baseFrag.clone()
frag.text = ""
frag.lineBreak = 1
self._appendFrag(frag)
else:
# Handle tabs in a simple way
text = text.replace(u"\t", 8 * u" ")
# Somehow for Reportlab NBSP have to be inserted
# as single character fragments
for text in re.split(r'(\ )', text):
frag = baseFrag.clone()
if text == " ":
text = NBSP
frag.text = text
self._appendFrag(frag)
else:
for text in re.split(u'(' + NBSP + u')', text):
frag = baseFrag.clone()
if text == NBSP:
self.force = True
frag.text = NBSP
self.text += text
self._appendFrag(frag)
else:
frag.text = " ".join(("x" + text + "x").split())[1: - 1]
if self.fragStrip:
frag.text = frag.text.lstrip()
if frag.text:
self.fragStrip = False
self.text += frag.text
self._appendFrag(frag)
# print frag.fontName, repr(frag.text), frag.bulletText
def pushFrag(self):
self.fragStack.append(self.frag)
self.newFrag()
def pullFrag(self):
self.frag = self.fragStack.pop()
# XXX
def _getFragment(self, l=20):
try:
return repr(" ".join(self.node.toxml().split()[:l]))
except:
return ""
def _getLineNumber(self):
return 0
def context(self, msg):
return "%s\n%s" % (
str(msg),
self._getFragment(50))
# return "line %s: %s\n%s" % (
# self._getLineNumber(),
# str(msg),
# self._getFragment(50))
def warning(self, msg, *args):
self.warn += 1
self.log.append((pisa_default.PML_WARNING, self._getLineNumber(), str(msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
def error(self, msg, *args):
self.err += 1
self.log.append((pisa_default.PML_ERROR, self._getLineNumber(), str(msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
# UTILS
def _getFileDeprecated(self, name, relative):
try:
if name.startswith("data:"):
return name
path = relative or self.pathDirectory
if self.pathCallback is not None:
nv = self.pathCallback(name, relative)
else:
if path is None:
log.warn("Could not find main directory for getting filename. Use CWD")
path = os.getcwd()
nv = os.path.normpath(os.path.join(path, name))
if not(nv and os.path.isfile(nv)):
nv = None
if nv is None:
log.warn(self.warning("File '%s' does not exist", name))
return nv
except:
log.warn(self.warning("getFile %r %r %r", name, relative, path), exc_info=1)
def getFile(self, name, relative=None):
"""
Returns a file name or None
"""
if self.pathCallback is not None:
return getFile(self._getFileDeprecated(name, relative))
return getFile(name, relative or self.pathDirectory)
def getFontName(self, names, default="helvetica"):
"""
Name of a font
"""
# print names, self.fontList
if type(names) is not types.ListType:
names = str(names).strip().split(",")
for name in names:
font = self.fontList.get(str(name).strip().lower(), None)
if font is not None:
return font
return self.fontList.get(default, None)
def registerFont(self, fontname, alias=[]):
self.fontList[str(fontname).lower()] = str(fontname)
for a in alias:
self.fontList[str(a)] = str(fontname)
def loadFont(self, names, src, encoding="WinAnsiEncoding", bold=0, italic=0):
# XXX Just works for local filenames!
if names and src: # and src.local:
file = src
src = file.uri
log.debug("Load font %r", src)
if type(names) is types.ListType:
fontAlias = names
else:
fontAlias = [x.lower().strip() for x in names.split(",") if x]
# XXX Problems with unicode here
fontAlias = [str(x) for x in fontAlias]
fontName = fontAlias[0]
parts = src.split(".")
baseName, suffix = ".".join(parts[: - 1]), parts[ - 1]
suffix = suffix.lower()
try:
if suffix == "ttf":
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(self.warning("Repeated font embed for %s, skip new embed ", fullFontName))
else:
# Register TTF font and special name
filename = file.getNamedFile()
pdfmetrics.registerFont(TTFont(fullFontName, filename))
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(fontName, bold, italic, fullFontName)
# Register "normal" name and the place holder for style
self.registerFont(fontName, fontAlias + [fullFontName])
elif suffix in ("afm", "pfb"):
if suffix == "afm":
afm = file.getNamedFile()
tfile = pisaFileObject(baseName + ".pfb")
pfb = tfile.getNamedFile()
else:
pfb = file.getNamedFile()
tfile = pisaFileObject(baseName + ".afm")
afm = tfile.getNamedFile()
#afm = baseName + ".afm"
#pfb = baseName + ".pfb"
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
#fontNameOriginal = ""
#for line in open(afm).readlines()[:-1]:
# if line[:16] == 'StartCharMetrics':
# self.error("Font name not found")
# if line[:8] == 'FontName':
# fontNameOriginal = line[9:].strip()
# break
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(self.warning("Repeated font embed for %s, skip new embed", fontName))
else:
# Include font
face = pdfmetrics.EmbeddedType1Face(afm, pfb)
fontNameOriginal = face.name
pdfmetrics.registerTypeFace(face)
# print fontName, fontNameOriginal, fullFontName
justFont = pdfmetrics.Font(fullFontName, fontNameOriginal, encoding)
pdfmetrics.registerFont(justFont)
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(fontName, bold, italic, fontNameOriginal)
# Register "normal" name and the place holder for style
self.registerFont(fontName, fontAlias + [fullFontName, fontNameOriginal])
#import pprint
#pprint.pprint(self.fontList)
else:
log.warning(self.warning("wrong attributes for <pdf:font>"))
except Exception:
log.warn(self.warning("Loading font '%s'", fontName), exc_info=1)
| 38.091423
| 141
| 0.503006
|
794e8eb943228ba371e13c8f18e4b6b873714242
| 365
|
py
|
Python
|
setup.py
|
sanmedina/Top-Lifetime-Grosses-scrapper
|
e8245719f9e204b31e17746c96657b962c901ce7
|
[
"MIT"
] | null | null | null |
setup.py
|
sanmedina/Top-Lifetime-Grosses-scrapper
|
e8245719f9e204b31e17746c96657b962c901ce7
|
[
"MIT"
] | null | null | null |
setup.py
|
sanmedina/Top-Lifetime-Grosses-scrapper
|
e8245719f9e204b31e17746c96657b962c901ce7
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name="Top-Lifetime-Grosses-scrapper",
version="1.0.0",
packages=["src"],
install_requires=[
"beautifulsoup4",
"lxml",
"requests",
"black",
"isort",
"pre-commit",
],
url="",
license="",
author="sanmedina",
author_email="",
description="",
)
| 17.380952
| 41
| 0.520548
|
794e8ed4e8cae3c78b585cbaf78269d71550e58b
| 444
|
py
|
Python
|
date_models/examine_dataset.py
|
RossDeVito/news-tsl-cse291
|
7b9c2fe2feb6da673bd1bd73684cd841bff07496
|
[
"MIT"
] | null | null | null |
date_models/examine_dataset.py
|
RossDeVito/news-tsl-cse291
|
7b9c2fe2feb6da673bd1bd73684cd841bff07496
|
[
"MIT"
] | 6
|
2021-04-06T18:22:56.000Z
|
2022-03-12T00:52:25.000Z
|
date_models/examine_dataset.py
|
RossDeVito/news-tsl-cse291
|
7b9c2fe2feb6da673bd1bd73684cd841bff07496
|
[
"MIT"
] | null | null | null |
'''
Short script used to examine the format of the data in a news timeline dataset
'''
import os
import pandas as pd
from date_models.model_utils import inspect
def main():
input_file = '../datasets/t17/bpoil/articles.preprocessed_mod.jsonl'
df = pd.read_json(input_file)
inspect(df)
if __name__ == '__main__':
print('Starting: ', os.path.basename(__file__))
main()
print('Completed: ', os.path.basename(__file__))
| 22.2
| 78
| 0.707207
|
794e8fb691d5cab797618210ea056ee76d840476
| 28,070
|
py
|
Python
|
test/functional/rpc_rawtransaction.py
|
VeriBlock/pop-bch
|
fd816fa9e173f944ce468820bfde8ee7a71edc96
|
[
"MIT"
] | 1
|
2021-09-08T14:27:00.000Z
|
2021-09-08T14:27:00.000Z
|
test/functional/rpc_rawtransaction.py
|
cculianu/bitcoin-abc
|
007128a7f610936c20e060a4f3a2afe623d453b4
|
[
"MIT"
] | 1
|
2022-03-09T14:32:59.000Z
|
2022-03-09T14:32:59.000Z
|
test/functional/rpc_rawtransaction.py
|
VeriBlock/pop-bch
|
fd816fa9e173f944ce468820bfde8ee7a71edc96
|
[
"MIT"
] | 1
|
2019-10-28T13:39:39.000Z
|
2019-10-28T13:39:39.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtranscation RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from decimal import Decimal
from collections import OrderedDict
from io import BytesIO
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxOut,
ToHex,
)
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.txtools import pad_raw_tx
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes,
hex_str_to_bytes,
)
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-txindex"], ["-txindex"], ["-txindex"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes(self.nodes[0], self.nodes[2])
def run_test(self):
self.log.info(
'prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info(
'Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction",
self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info(
'Check parameter types and required parameters of createrawtransaction')
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction",
self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction",
self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction",
self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array",
self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected",
self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1,
"JSON value is not a string as expected",
self.nodes[0].createrawtransaction,
[{}],
{})
assert_raises_rpc_error(-8,
"txid must be of length 64 (not 3, for 'foo')",
self.nodes[0].createrawtransaction,
[{'txid': 'foo'}],
{})
assert_raises_rpc_error(-8,
"txid must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')",
self.nodes[0].createrawtransaction,
[{'txid': 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844'}],
{})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key",
self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be a number",
self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive",
self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range",
self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected",
self.nodes[0].createrawtransaction, [], 'foo')
# Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs={})
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string",
self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Bitcoin address",
self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range",
self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: {}".format(
address), self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: {}".format(
address), self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8,
"Invalid parameter, duplicate key: data",
self.nodes[0].createrawtransaction,
[],
[{"data": 'aa'},
{"data": "bb"}])
assert_raises_rpc_error(-8,
"Invalid parameter, duplicate key: data",
self.nodes[0].createrawtransaction,
[],
multidict([("data",
'aa'),
("data",
"bb")]))
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key",
self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected",
self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number",
self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range",
self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range",
self.nodes[0].createrawtransaction, [], {}, 4294967296)
self.log.info(
'Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
# One output
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(
inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(
inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[
{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[
{address: 99}, {address2: 99}]),
)
# Multiple mixed outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[
{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[
{address: 99}, {address2: 99}, {'data': '99'}]),
)
self.log.info('sendrawtransaction with missing input')
# won't exists
inputs = [
{'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout': 1}]
outputs = {self.nodes[0].getnewaddress(): 4.998}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = pad_raw_tx(rawtx)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25,
"bad-txns-inputs-missingorspent",
self.nodes[2].sendrawtransaction,
rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct
# block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a
# block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found",
self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-1,
"JSON value is not a string as expected",
self.nodes[0].getrawtransaction,
tx,
True,
True)
assert_raises_rpc_error(-8,
"parameter 3 must be of length 64 (not 6, for 'foobar')",
self.nodes[0].getrawtransaction,
tx,
True,
"foobar")
assert_raises_rpc_error(-8,
"parameter 3 must be of length 64 (not 8, for 'abcd1234')",
self.nodes[0].getrawtransaction,
tx,
True,
"abcd1234")
assert_raises_rpc_error(
-8,
"parameter 3 must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')",
self.nodes[0].getrawtransaction,
tx,
True,
"ZZZ0000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction,
tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(
txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#
# RAW TX MULTISIG TESTS #
#
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key",
self.nodes[0].createmultisig, 1, ["01020304"])
# createmultisig can only take public keys
self.nodes[0].createmultisig(
2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# addmultisigaddress can take both pubkeys and addresses so long as
# they are in the wallet, which is tested here.
assert_raises_rpc_error(-5, "Invalid public key",
self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1])
mSigObj = self.nodes[2].addmultisigaddress(
2, [addr1Obj['pubkey'], addr1])['address']
# use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BCH to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# node2 has both keys of the 2of2 ms addr., tx should affect the
# balance
assert_equal(self.nodes[2].getbalance(), bal + Decimal('1.20000000'))
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(
2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# THIS IS AN INCOMPLETE FEATURE
# NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND
# COUNT AT BALANCE CALCULATION
# for now, assume the funds of a 2of3 multisig tx are not marked as
# spendable
assert_equal(self.nodes[2].getbalance(), bal)
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx['vout']
if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{
"txid": txId,
"vout": vout['n'],
"scriptPubKey": vout['scriptPubKey']['hex'],
"amount": vout['value'],
}]
outputs = {self.nodes[0].getnewaddress(): 2.19}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(
rawTx, inputs)
# node1 only has one key, can't comp. sign the tx
assert_equal(rawTxPartialSigned['complete'], False)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
# node2 can sign the tx compl., own two of three keys
assert_equal(rawTxSigned['complete'], True)
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal + Decimal(
'50.00000000') + Decimal('2.19000000')) # block reward + tx
rawTxBlock = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(
2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(
2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# the funds of a 2of2 multisig tx should not be marked as spendable
assert_equal(self.nodes[2].getbalance(), bal)
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx2['vout']
if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{"txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']
['hex'], "redeemScript": mSigObjValid['hex'], "amount": vout['value']}]
outputs = {self.nodes[0].getnewaddress(): 2.19}
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(
rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
# node1 only has one key, can't comp. sign the tx
assert_equal(rawTxPartialSigned1['complete'], False)
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(
rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
# node2 only has one key, can't comp. sign the tx
assert_equal(rawTxPartialSigned2['complete'], False)
rawTxComb = self.nodes[2].combinerawtransaction(
[rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(
), bal + Decimal('50.00000000') + Decimal('2.19000000')) # block reward + tx
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(
self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(
self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(
txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to
# update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(
txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(
txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(
-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "False")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(
-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(
-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
# Sanity checks on verbose getrawtransaction output
rawTxOutput = self.nodes[0].getrawtransaction(txHash, True)
assert_equal(rawTxOutput["hex"], rawTxSigned["hex"])
assert_equal(rawTxOutput["txid"], txHash)
assert_equal(rawTxOutput["hash"], txHash)
assert_greater_than(rawTxOutput["size"], 300)
assert_equal(rawTxOutput["version"], 0x02)
assert_equal(rawTxOutput["locktime"], 0)
assert_equal(len(rawTxOutput["vin"]), 1)
assert_equal(len(rawTxOutput["vout"]), 1)
assert_equal(rawTxOutput["blockhash"], rawTxBlock["hash"])
assert_equal(rawTxOutput["confirmations"], 3)
assert_equal(rawTxOutput["time"], rawTxBlock["time"])
assert_equal(rawTxOutput["blocktime"], rawTxBlock["time"])
inputs = [
{'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'sequence': 1000}]
outputs = {self.nodes[0].getnewaddress(): 1}
assert_raises_rpc_error(
-8, 'Invalid parameter, missing vout key',
self.nodes[0].createrawtransaction, inputs, outputs)
inputs[0]['vout'] = "1"
assert_raises_rpc_error(
-8, 'Invalid parameter, vout must be a number',
self.nodes[0].createrawtransaction, inputs, outputs)
inputs[0]['vout'] = -1
assert_raises_rpc_error(
-8, 'Invalid parameter, vout must be positive',
self.nodes[0].createrawtransaction, inputs, outputs)
inputs[0]['vout'] = 1
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs[0]['sequence'] = -1
assert_raises_rpc_error(
-8, 'Invalid parameter, sequence number is out of range',
self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs[0]['sequence'] = 4294967296
assert_raises_rpc_error(
-8, 'Invalid parameter, sequence number is out of range',
self.nodes[0].createrawtransaction, inputs, outputs)
inputs[0]['sequence'] = 4294967294
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
####################################
# TRANSACTION VERSION NUMBER TESTS #
####################################
# Test the minimum transaction version number that fits in a signed
# 32-bit integer.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], -0x80000000)
# Test the maximum transaction version number that fits in a signed
# 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
self.log.info('sendrawtransaction/testmempoolaccept with maxfeerate')
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
rawTx = self.nodes[0].getrawtransaction(txId, True)
vout = next(o for o in rawTx['vout']
if o['value'] == Decimal('1.00000000'))
self.sync_all()
inputs = [{"txid": txId, "vout": vout['n']}]
# 1000 sat fee
outputs = {self.nodes[0].getnewaddress(): Decimal("0.99999000")}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
# 1000 sat fee, ~200 b transaction, fee rate should land around 5 sat/b = 0.00005000 BTC/kB
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept(
[rawTxSigned['hex']], 0.00001000)[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], '256: absurdly-high-fee')
# and sendrawtransaction should throw
assert_raises_rpc_error(-26,
"absurdly-high-fee",
self.nodes[2].sendrawtransaction,
rawTxSigned['hex'],
0.00001000)
# And below calls should both succeed
testres = self.nodes[2].testmempoolaccept(
rawtxs=[rawTxSigned['hex']], maxfeerate=0.00007000)[0]
assert_equal(testres['allowed'], True)
self.nodes[2].sendrawtransaction(
hexstring=rawTxSigned['hex'],
maxfeerate=0.00007000)
##########################################
# Decoding weird scripts in transactions #
##########################################
self.log.info('Decode correctly-formatted but weird transactions')
tx = CTransaction()
# empty
self.nodes[0].decoderawtransaction(ToHex(tx))
# truncated push
tx.vin.append(CTxIn(COutPoint(42, 0), b'\x4e\x00\x00'))
tx.vin.append(CTxIn(COutPoint(42, 0), b'\x4c\x10TRUNC'))
tx.vout.append(CTxOut(0, b'\x4e\x00\x00'))
tx.vout.append(CTxOut(0, b'\x4c\x10TRUNC'))
self.nodes[0].decoderawtransaction(ToHex(tx))
# giant pushes and long scripts
tx.vin.append(
CTxIn(COutPoint(42, 0), CScript([b'giant push' * 10000])))
tx.vout.append(CTxOut(0, CScript([b'giant push' * 10000])))
self.nodes[0].decoderawtransaction(ToHex(tx))
self.log.info('Refuse garbage after transaction')
assert_raises_rpc_error(-22, 'TX decode failed',
self.nodes[0].decoderawtransaction, ToHex(tx) + '00')
if __name__ == '__main__':
RawTransactionsTest().main()
| 46.627907
| 139
| 0.585358
|
794e9056d41f66b5a9af93f43724bff85ce699fe
| 3,203
|
py
|
Python
|
jithbot.py
|
Jithra/JithBot
|
e99679fde3a74d956c40db15eca91223a401ad2e
|
[
"MIT"
] | null | null | null |
jithbot.py
|
Jithra/JithBot
|
e99679fde3a74d956c40db15eca91223a401ad2e
|
[
"MIT"
] | null | null | null |
jithbot.py
|
Jithra/JithBot
|
e99679fde3a74d956c40db15eca91223a401ad2e
|
[
"MIT"
] | null | null | null |
from PluginManager import PluginManager
import discord
import traceback
import re
print("Starting JithBot")
print("Starting Discord Client")
# Creates a discord client, which we will use to connect and interact with the server.
# All methods with @client.event annotations are event handlers for this client.
client = discord.Client()
print("Loading plugins")
# Loads and initializes the plugin manager for the bot
pm = PluginManager("plugins", client)
pm.load_plugins()
pm.register_events()
print("Plugins loaded and registered")
@client.event
async def on_ready():
"""
Event handler, fires when the bot has connected and is logged in
"""
print('Logged in as ' + client.user.name + " (" + client.user.id + ")")
# Change nickname to nickname in configuration
for instance in client.servers:
await client.change_nickname(instance.me, pm.botPreferences.nickName)
await client.change_presence(game=discord.Game(name='Evolving into a sentient being', type = 0))
@client.event
async def on_message(message):
"""
Event handler, fires when a message is received in the server.
:param message: discord.Message object containing the received message
"""
try:
if message.content.startswith(pm.botPreferences.commandPrefix):
# Send the received message off to the Plugin Manager to handle the command
words = message.content.partition(' ')
await pm.handle_command(message, words[0][len(pm.botPreferences.commandPrefix):], words[1:])
elif message.server is not None:
await pm.handle_message(message)
except Exception as e:
await client.send_message(message.channel, "Error: " + str(e))
if pm.botPreferences.get_config_value("client", "debug") == "1":
traceback.print_exc()
@client.event
async def on_typing(channel, user, when):
"""
Event handler, fires when a user is typing in a channel
:param channel: discord.Channel object containing channel information
:param user: discord.Member object containing the user information
:param when: datetime timestamp
"""
try:
await pm.handle_typing(channel, user, when)
except Exception as e:
await client.send_message(channel, "Error: " + str(e))
if pm.botPreferences.get_config_value("client", "debug") == "1":
traceback.print_exc()
@client.event
async def on_message_delete(message):
"""
Event handler, fires when a message is deleted
:param message: discord.Message object containing the deleted message
"""
try:
if message.author.name != "PluginBot":
await pm.handle_message_delete(message)
except Exception as e:
await client.send_message(message.channel, "Error: " + str(e))
if pm.botPreferences.get_config_value("client", "debug") == "1":
traceback.print_exc()
@client.event
async def on_member_join(member):
await pm.handle_member_join(member)
@client.event
async def on_member_remove(member):
await pm.handle_member_leave(member)
# Run the client and login with the bot token (yes, this needs to be down here)
client.run(pm.botPreferences.token)
| 33.364583
| 104
| 0.700905
|
794e90b25d141c582eae5b0f89ea2b5927153f7d
| 3,782
|
py
|
Python
|
P1/Parte3.py
|
HerouFenix/PraticasIA
|
37f829b53e2eb2986468e69de297699fcd2a8462
|
[
"MIT"
] | 2
|
2019-10-16T13:28:59.000Z
|
2019-11-13T13:21:51.000Z
|
P1/Parte3.py
|
HerouFenix/PraticasIA
|
37f829b53e2eb2986468e69de297699fcd2a8462
|
[
"MIT"
] | null | null | null |
P1/Parte3.py
|
HerouFenix/PraticasIA
|
37f829b53e2eb2986468e69de297699fcd2a8462
|
[
"MIT"
] | 2
|
2019-10-30T13:33:56.000Z
|
2019-11-25T14:36:28.000Z
|
#Funçoes que retornam None
#1. Dada uma lista, retornar o elemento que esta a cabeca (ou seja, na posicao 0).
def get_head(lista): #( ͡° ͜ʖ ͡°)
if len(lista) == 0: #No caso de lista vazia
return None
return lista[0]
#2. Dada uma lista, retornar a sua cauda (ou seja, todos os elementos a excepcao do primeiro).
def get_tail(lista):
if len(lista) == 0: #No caso de lista vazia
return None
return lista[1:]
#3. Dado um par de listas com igual comprimento, produzir uma lista dos pares dos elementos hoḿologos. (P.ex [1,2,3], [4,5,6] -> [(1,4),(2,5),(3,6])
def join_homologos(list1,list2):
if len(list1) != len(list2): #Se n tiverem comprimento igual R.I.P
return None
if len(list1) == 0: #Se uma delas tiver comp 0 (a outra tmb tem) e estamos num caso limite (return lista vazia)
return []
return [(list1[0],list2[0])] + join_homologos(list1[1:],list2[1:]) #Criar o tuplo da cabeca de cada uma das listas e dar append da proxima iteracao
#4. Dada uma lista de numeros, retorna o menor elemento.
def get_smallest(lista):
if len(lista) == 0: #No caso de lista vazia
return None
return min(lista)
#5. Dada uma lista de numeros, retorna um par formado pelo menor elemento e pela lista dos restantes elementos.
def get_smallest_and_rest(lista):
if len(lista) == 0: #No caso de lista vazia
return None
smallest = min(lista)
return [smallest, [element for element in lista if element != smallest] ] #Used List Comprehension :O
#6. Dada uma lista de numeros, calcular o maximo e o mınimo, retornando-os num tuplo.
def get_smallest_and_biggest(lista):
if len(lista) == 0: #No caso de lista vazia
return None
return (min(lista),max(lista))
#7. Dada uma lista de numeros, retorna um triplo formado pelos dois menores elementos e pela lista dos restantes elementos.
def get_smallest_biggest_and_rest(lista):
if len(lista) == 0: #No caso de lista vazia
return None
smallest = min(lista)
biggest = max(lista)
return (smallest,biggest, [element for element in lista if element != smallest and element != biggest])
#8. Dada uma lista ordenada de numeros, calcular se possıvel a respectiva media e mediana, retornando-as num tuplo.
def get_average_and_median(lista):
if len(lista) == 0: #No caso de lista vazia
return None
if len([element for element in lista if isinstance(element, str)]) != 0: #Verificar se a lista é apenas numerica
return None
if lista != sorted(lista): #Verificar se a lista esta ordenada
return None
median = lista[len(lista)//2]
sum = 0
for i in lista:
sum += i
average = sum/len(lista)
return (average,median)
if __name__ == "__main__":
#1
print("1) " + str(get_head([1,2,3])))
print("1) " + str(get_head([])))
#2
print("2) " + str(get_tail([1,2,3])))
print("2) " + str(get_tail([])))
#3
print("3) " + str(join_homologos([1,2,3],[3,2,1])))
print("3) " + str(join_homologos([1,2,3],[3,1])))
#4
print("4) " + str(get_smallest([1,2,3])))
print("4) " + str(get_smallest([])))
#5
print("5) " + str(get_smallest_and_rest([1,2,3])))
print("5) " + str(get_smallest_and_rest([])))
#6
print("6) " + str(get_smallest_and_biggest([1,2,3,4])))
print("6) " + str(get_smallest_and_biggest([])))
#7
print("7) " + str(get_smallest_biggest_and_rest([1,2,3,4])))
print("7) " + str(get_smallest_biggest_and_rest([])))
#8
print("8) " + str(get_average_and_median([1,2,3,4,4,5,5])))
print("8) " + str(get_average_and_median([1,3,4,2])))
print("8) " + str(get_average_and_median([])))
print("8) " + str(get_average_and_median([1,3,'b'])))
| 34.697248
| 151
| 0.638287
|
794e90ebf4066373e9e80503d5223bdfcb0a3273
| 580
|
py
|
Python
|
lab8/point.py
|
kuzkov/computational-geometry
|
4411231a8097e618e03b3ef0ad5836e49e837216
|
[
"MIT"
] | 1
|
2021-04-04T07:34:14.000Z
|
2021-04-04T07:34:14.000Z
|
lab8/point.py
|
kuzkov/computational-geometry
|
4411231a8097e618e03b3ef0ad5836e49e837216
|
[
"MIT"
] | null | null | null |
lab8/point.py
|
kuzkov/computational-geometry
|
4411231a8097e618e03b3ef0ad5836e49e837216
|
[
"MIT"
] | 1
|
2021-02-18T09:50:10.000Z
|
2021-02-18T09:50:10.000Z
|
import math
import numpy as np
from vector import Vector
import segment as segment_lib
class Point(Vector):
def direction(self, segment):
det = np.linalg.det([
segment.as_vector().as_array(),
segment_lib.Segment(segment.p1, self).as_vector().as_array()
])
return 1 if det > 0 else 0 if math.isclose(det, 0) else -1 # 1 left, -1 right, 0 on
def inside_segment(self, segment):
pass
def tolist(self):
return (self.x, self.y)
def within_polygon(self, polygon):
return polygon.contains(self)
| 25.217391
| 91
| 0.631034
|
794e90fd526d75f68cf9df0b416a5f4fa1249391
| 9,900
|
py
|
Python
|
3D/models/main_model.py
|
zabaras/inn-surrogate
|
e04bbabb0c93ad9d8880193e3c1410ba5d9211c2
|
[
"MIT"
] | 12
|
2021-02-17T08:38:23.000Z
|
2021-12-14T20:34:31.000Z
|
3D/models/main_model.py
|
zabaras/inn-surrogate
|
e04bbabb0c93ad9d8880193e3c1410ba5d9211c2
|
[
"MIT"
] | 1
|
2021-11-18T13:25:18.000Z
|
2021-11-18T15:11:57.000Z
|
3D/models/main_model.py
|
zabaras/inn-surrogate
|
e04bbabb0c93ad9d8880193e3c1410ba5d9211c2
|
[
"MIT"
] | 5
|
2021-02-19T23:06:29.000Z
|
2021-09-20T17:11:00.000Z
|
import numpy as np
import torch
import sys
import torch.nn as nn
from models.flat_data_model import Flat_data
from models.Unflat_data_model import Unflat_data
from models.Divide_data_model import divide_data
from models.Permute_data_model import Permute_data
from models.Downsample_model import Downsample
from models.CouplingBlock_model import CouplingBlock
from models.CouplingOneSide_model import CouplingOneSide
class main_file(nn.Module):
'''
Args:
s_net_t_net: scale and shift network
input_dimension: input dimension
for corresponding multiscale blocks.
x: Input (BXCXDXHXW)
c: conditioning data
'''
def __init__(self, cond_size, s_net_t_net,
input_dimension1,input_dimension12,cond_size1, permute_a1,value_dim,input_dimension1_r,
input_dimension2,input_dimension22,cond_size2,permute_a2,s_net_t_net2,input_dimension2_r,
input_dimension3,input_dimension32,cond_size3,s_net_t_net3,permute_a3):
super(main_file,self).__init__()
self.single_side1 = CouplingOneSide(s_net_t_net, cond_size)
self.single_side2 = CouplingOneSide(s_net_t_net, cond_size)
self.single_side3 = CouplingOneSide(s_net_t_net, cond_size)
self.single_side4 = CouplingOneSide(s_net_t_net, cond_size)
self.single_side5 = CouplingOneSide(s_net_t_net, cond_size)
self.single_side6 = CouplingOneSide(s_net_t_net, cond_size)
self.downsample = Downsample()
self.coupling1 = CouplingBlock(s_net_t_net, input_dimension1,input_dimension12,cond_size1)
self.coupling2 = CouplingBlock(s_net_t_net, input_dimension1,input_dimension12,cond_size1)
self.coupling3 = CouplingBlock(s_net_t_net, input_dimension1,input_dimension12,cond_size1)
self.coupling4 = CouplingBlock(s_net_t_net, input_dimension1,input_dimension12,cond_size1)
self.coupling5 = CouplingBlock(s_net_t_net, input_dimension1,input_dimension12,cond_size1)
self.permute = Permute_data(permute_a1,0)
self.permute_c1 = Permute_data(permute_a1,1)
self.permute_c2 = Permute_data(permute_a1,2)
self.permute_c3 = Permute_data(permute_a1,3)
self.permute_c4 = Permute_data(permute_a1,4)
self.unflat1 = Unflat_data(input_dimension1_r)
self.split = divide_data(input_dimension1,value_dim)
self.coupling21 = CouplingBlock(s_net_t_net2, input_dimension2,input_dimension22,cond_size2)
self.coupling22 = CouplingBlock(s_net_t_net2, input_dimension2,input_dimension22,cond_size2)
self.coupling23 = CouplingBlock(s_net_t_net2, input_dimension2,input_dimension22,cond_size2)
self.coupling24 = CouplingBlock(s_net_t_net2, input_dimension2,input_dimension22,cond_size2)
self.permute2 = Permute_data(permute_a2,0)
self.permute2_c1 = Permute_data(permute_a2,1)
self.permute2_c2 = Permute_data(permute_a2,2)
self.permute2_c3 = Permute_data(permute_a2,3)
self.split2 = divide_data(input_dimension2,[4,4])
self.flat2 = Flat_data()
self.unflat2 = Unflat_data(input_dimension2_r)
self.coupling31 = CouplingBlock(s_net_t_net3, input_dimension3,input_dimension32,cond_size3)
self.permute3 = Permute_data(permute_a3,0)
def forward(self, x, c1,c2,c3,c4,sample_the_data=False,forward=False,jac=False):
if forward==True:
#1-1
out1= self.single_side1(x,c1)
jac0 = self.single_side1.jacobian()
#1-2
out2 = self.single_side2(out1,c1)
jac0_1 = self.single_side2.jacobian()
#1-3
out3= self.single_side3(out2,c1)
jac0_2 = self.single_side3.jacobian()
#1-4
out4 = self.single_side4(out3,c1)
jac0_3 = self.single_side4.jacobian()
#1-5
out5 = self.single_side5(out4,c1)
jac0_4 = self.single_side5.jacobian()
#1-6
out6 = self.single_side6(out5,c1)
jac0_5 = self.single_side6.jacobian()
out7 = self.downsample(out6)
jac_glow1 =out7
#2
out12 = self.coupling1(out7,c2)
jac1 = self.coupling1.jacobian()
out13 = self.permute(out12)
out14 = self.coupling2(out13,c2)
jac1_c1 = self.coupling2.jacobian()
out15 = self.permute_c1(out14)
out16 = self.coupling3(out15,c2)
jac1_c2 = self.coupling3.jacobian()
out17 = self.permute_c2(out16)
out18 = self.coupling4(out17,c2)
jac1_c3 = self.coupling4.jacobian()
out19 = self.permute_c3(out18)
out20 = self.coupling5(out19,c2)
jac1_c4 = self.coupling5.jacobian()
out21 = self.permute_c4(out20)
out22 = self.split(out21)
out1s = out22[0]
out2s = out22[1]
flat_output1 = self.flat2(out2s)
out31 = self.downsample(out1s)
jac_glow2 = out31
#3
out32 = self.coupling21(out31,c3)
jac2 = self.coupling21.jacobian()
out33 = self.permute2(out32)
out34 = self.coupling22(out33,c3)
jac2_c1 = self.coupling22.jacobian()
out35 = self.permute2_c1(out34)
out36 = self.coupling23(out35,c3)
jac2_c2 = self.coupling23.jacobian()
out37= self.permute2_c2(out36)
out38 = self.coupling24(out37,c3)
jac2_c3 = self.coupling24.jacobian()
out39 = self.permute2_c3(out38)
out40 = self.split2(out39)
out1s4 = out40[0]
out2s4 = out40[1]
flat_output2 = self.flat2(out2s4)
flat_ds2 = self.flat2(out1s4)
jac_glow3 = flat_ds2
#4
out1f = self.coupling31(flat_ds2,c4)
jac3 = self.coupling31.jacobian()
out_all = self.permute3(out1f)
final_out = torch.cat((flat_output1,flat_output2,out_all),dim=1)
#jacobian
jac = jac0+jac1+jac2+jac3+jac0_1+jac0_2+jac0_3+jac0_4+jac0_5+jac1_c1+jac1_c2+jac1_c3+jac1_c4+jac2_c1+jac2_c2+jac2_c3
return final_out, jac
else:
#import the data:
out1 = x[:,:8192]
out1_unflat = self.unflat1(out1)
out2 = x[:,8192:12288]
out2_unflat = self.unflat2(out2)
out3 = x[:,12288:]
out3p = self.permute3(out3,sample_the_data=True)
out = self.coupling31(out3p,c4,sample_the_data=True)
out3_unflat = self.unflat2(out)
#combine the data
combine_out2_out3 = torch.cat((out3_unflat,out2_unflat), dim=1)
#=========================================
#permute the data
out_4 = self.permute2_c3(combine_out2_out3,sample_the_data=True)
out_5 = self.coupling24(out_4,c3,sample_the_data=True)
#==============================================
#=========================================
#permute the data
out_4 = self.permute2_c2(out_5,sample_the_data=True)
out_5 = self.coupling23(out_4,c3,sample_the_data=True)
#==============================================
#=========================================
#permute the data
out_4 = self.permute2_c1(out_5,sample_the_data=True)
out_5 = self.coupling22(out_4,c3,sample_the_data=True)
#============================================== Here
#=========================================
#permute the data
out_4 = self.permute2(out_5,sample_the_data=True)
out_5 = self.coupling21(out_4,c3,sample_the_data=True)
#============================================== Here
out_6 = self.downsample(out_5,sample_the_data=True)
combine_out6_out1 = torch.cat((out_6,out1_unflat), dim=1)
#=============================
#permute
out_7 = self.permute_c4(combine_out6_out1,sample_the_data=True)
out_8 = self.coupling5(out_7,c2,sample_the_data=True)
#==================================
#=============================
#permute
out_7 = self.permute_c3(out_8,sample_the_data=True)
out_8 = self.coupling4(out_7,c2,sample_the_data=True)
#==================================
#=============================
#permute
out_7 = self.permute_c2(out_8,sample_the_data=True)
out_8 = self.coupling3(out_7,c2,sample_the_data=True)
#==================================
#=============================
#permute
out_7 = self.permute_c1(out_8,sample_the_data=True)
out_8 = self.coupling2(out_7,c2,sample_the_data=True)
#==================================
#=============================
#permute
out_7 = self.permute(out_8,sample_the_data=True)
out_8 = self.coupling1(out_7,c2,sample_the_data=True)
#==================================
#updample 1X64X64
out_9 = self.downsample(out_8,sample_the_data=True)
out_10 = self.single_side6(out_9,c1,sample_the_data=True)
out_10 = self.single_side5(out_10,c1,sample_the_data=True)
out_10 = self.single_side4(out_10,c1,sample_the_data=True)
out_10 = self.single_side3(out_10,c1,sample_the_data=True)
out_10 = self.single_side2(out_10,c1,sample_the_data=True)
out_10 = self.single_side1(out_10,c1,sample_the_data=True)
return out_10
| 39.285714
| 128
| 0.580707
|
794e91238f5c819eaef9c850c84c8da86afe80ce
| 1,810
|
py
|
Python
|
docs/sources/conf.py
|
imbi7py/filemanager
|
1f0cf3ceb826528fe6be8fb38cfa1d252afbce22
|
[
"MIT"
] | 24
|
2020-10-31T11:59:30.000Z
|
2022-03-23T10:37:05.000Z
|
docs/sources/conf.py
|
imbi7py/filemanager
|
1f0cf3ceb826528fe6be8fb38cfa1d252afbce22
|
[
"MIT"
] | 2
|
2020-11-19T20:26:21.000Z
|
2021-05-04T17:06:28.000Z
|
docs/sources/conf.py
|
kivymd-extensions/filemanager
|
1f0cf3ceb826528fe6be8fb38cfa1d252afbce22
|
[
"MIT"
] | 2
|
2020-11-16T13:37:04.000Z
|
2021-04-15T09:57:07.000Z
|
# Configuration file for the Sphinx documentation builder.
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# Path setup
import os
import sys
sys.path.insert(0, os.path.abspath("_extensions"))
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath("."))))
import autoapi_filemanager # NOQA. from _extensions
# Project information
project = "File Manager"
copyright = "2020 KivyMD Team"
author = "KivyMD Team"
version = "0.1.0"
release = "0.1.0"
# General configuration
master_doc = "index"
exclude_patterns = []
templates_path = ["_templates"]
locale_dirs = ["_locales"]
language = "Python"
# HTML Theme
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
html_favicon = "_static/logo-kivymd.png"
html_logo = "_static/logo-kivymd.png"
html_theme_options = {
"canonical_url": "https://kivymd.readthedocs.io/en/latest/",
"navigation_depth": 2,
"collapse_navigation": False,
"titles_only": True,
}
# Extensions
extensions = [
"sphinx.ext.autodoc",
"autoapi_filemanager",
"sphinx.ext.intersphinx",
"kivy_lexer",
"toctree_with_sort",
]
# AutoAPI configuration
autoapi_dirs = ["../../kivymd_extensions/filemanager"]
autoapi_template_dir = os.path.abspath("_templates")
autoapi_type = "python"
autoapi_file_patterns = ["*.py"]
autoapi_generate_api_docs = True
autoapi_options = ["members", "undoc-members"]
autoapi_root = "api"
autoapi_add_toctree_entry = False
autoapi_include_inheritance_graphs = False
autoapi_include_summaries = True
autoapi_python_class_content = "class"
autoapi_python_use_implicit_namespaces = False
autoapi_keep_files = False # True for debugging
# InterSphinx configuration
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"kivy": ("https://kivy.org/doc/stable/", None),
}
| 25.492958
| 74
| 0.739779
|
794e92f4d7f7435d4f4c9fdaf31ca68fce3ca0ec
| 26,336
|
py
|
Python
|
Exercises/pacman-search/pacman.py
|
yajuanw/artificial-intelligence
|
b10f7651fd4516a9ccf8b90e05a12809a3ba7b33
|
[
"MIT"
] | 2
|
2021-01-03T11:04:03.000Z
|
2021-01-03T11:04:03.000Z
|
Exercises/pacman-search/pacman.py
|
yajuanw/artificial-intelligence
|
b10f7651fd4516a9ccf8b90e05a12809a3ba7b33
|
[
"MIT"
] | null | null | null |
Exercises/pacman-search/pacman.py
|
yajuanw/artificial-intelligence
|
b10f7651fd4516a9ccf8b90e05a12809a3ba7b33
|
[
"MIT"
] | null | null | null |
# pacman.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
Pacman.py holds the logic for the classic pacman game along with the main
code to run a game. This file is divided into three sections:
(i) Your interface to the pacman world:
Pacman is a complex environment. You probably don't want to
read through all of the code we wrote to make the game runs
correctly. This section contains the parts of the code
that you will need to understand in order to complete the
project. There is also some code in game.py that you should
understand.
(ii) The hidden secrets of pacman:
This section contains all of the logic code that the pacman
environment uses to decide who can move where, who dies when
things collide, etc. You shouldn't need to read this section
of code, but you can if you want.
(iii) Framework to start a game:
The final section contains the code for reading the command
you use to set up the game, then starting up a new game, along with
linking in all the external parts (agent functions, graphics).
Check this section out to see all the options available to you.
To play your first game, type 'python pacman.py' from the command line.
The keys are 'a', 's', 'd', and 'w' to move (or arrow keys). Have fun!
"""
from game import GameStateData
from game import Game
from game import Directions
from game import Actions
from util import nearestPoint
from util import manhattanDistance
import util, layout
import sys, types, time, random, os
###################################################
# YOUR INTERFACE TO THE PACMAN WORLD: A GameState #
###################################################
class GameState:
"""
A GameState specifies the full game state, including the food, capsules,
agent configurations and score changes.
GameStates are used by the Game object to capture the actual state of the game and
can be used by agents to reason about the game.
Much of the information in a GameState is stored in a GameStateData object. We
strongly suggest that you access that data via the accessor methods below rather
than referring to the GameStateData object directly.
Note that in classic Pacman, Pacman is always agent 0.
"""
####################################################
# Accessor methods: use these to access state data #
####################################################
# static variable keeps track of which states have had getLegalActions called
explored = set()
def getAndResetExplored():
tmp = GameState.explored.copy()
GameState.explored = set()
return tmp
getAndResetExplored = staticmethod(getAndResetExplored)
def getLegalActions( self, agentIndex=0 ):
"""
Returns the legal actions for the agent specified.
"""
# GameState.explored.add(self)
if self.isWin() or self.isLose(): return []
if agentIndex == 0: # Pacman is moving
return PacmanRules.getLegalActions( self )
else:
return GhostRules.getLegalActions( self, agentIndex )
def generateSuccessor( self, agentIndex, action):
"""
Returns the successor state after the specified agent takes the action.
"""
# Check that successors exist
if self.isWin() or self.isLose(): raise Exception('Can\'t generate a successor of a terminal state.')
# Copy current state
state = GameState(self)
# Let agent's logic deal with its action's effects on the board
if agentIndex == 0: # Pacman is moving
state.data._eaten = [False for i in range(state.getNumAgents())]
PacmanRules.applyAction( state, action )
else: # A ghost is moving
GhostRules.applyAction( state, action, agentIndex )
# Time passes
if agentIndex == 0:
state.data.scoreChange += -TIME_PENALTY # Penalty for waiting around
else:
GhostRules.decrementTimer( state.data.agentStates[agentIndex] )
# Resolve multi-agent effects
GhostRules.checkDeath( state, agentIndex )
# Book keeping
state.data._agentMoved = agentIndex
state.data.score += state.data.scoreChange
GameState.explored.add(self)
GameState.explored.add(state)
return state
def getLegalPacmanActions( self ):
return self.getLegalActions( 0 )
def generatePacmanSuccessor( self, action ):
"""
Generates the successor state after the specified pacman move
"""
return self.generateSuccessor( 0, action )
def getPacmanState( self ):
"""
Returns an AgentState object for pacman (in game.py)
state.pos gives the current position
state.direction gives the travel vector
"""
return self.data.agentStates[0].copy()
def getPacmanPosition( self ):
return self.data.agentStates[0].getPosition()
def getGhostStates( self ):
return self.data.agentStates[1:]
def getGhostState( self, agentIndex ):
if agentIndex == 0 or agentIndex >= self.getNumAgents():
raise Exception("Invalid index passed to getGhostState")
return self.data.agentStates[agentIndex]
def getGhostPosition( self, agentIndex ):
if agentIndex == 0:
raise Exception("Pacman's index passed to getGhostPosition")
return self.data.agentStates[agentIndex].getPosition()
def getGhostPositions(self):
return [s.getPosition() for s in self.getGhostStates()]
def getNumAgents( self ):
return len( self.data.agentStates )
def getScore( self ):
return float(self.data.score)
def getCapsules(self):
"""
Returns a list of positions (x,y) of the remaining capsules.
"""
return self.data.capsules
def getNumFood( self ):
return self.data.food.count()
def getFood(self):
"""
Returns a Grid of boolean food indicator variables.
Grids can be accessed via list notation, so to check
if there is food at (x,y), just call
currentFood = state.getFood()
if currentFood[x][y] == True: ...
"""
return self.data.food
def getWalls(self):
"""
Returns a Grid of boolean wall indicator variables.
Grids can be accessed via list notation, so to check
if there is a wall at (x,y), just call
walls = state.getWalls()
if walls[x][y] == True: ...
"""
return self.data.layout.walls
def hasFood(self, x, y):
return self.data.food[x][y]
def hasWall(self, x, y):
return self.data.layout.walls[x][y]
def isLose( self ):
return self.data._lose
def isWin( self ):
return self.data._win
#############################################
# Helper methods: #
# You shouldn't need to call these directly #
#############################################
def __init__( self, prevState = None ):
"""
Generates a new state by copying information from its predecessor.
"""
if prevState != None: # Initial state
self.data = GameStateData(prevState.data)
else:
self.data = GameStateData()
def deepCopy( self ):
state = GameState( self )
state.data = self.data.deepCopy()
return state
def __eq__( self, other ):
"""
Allows two states to be compared.
"""
return hasattr(other, 'data') and self.data == other.data
def __hash__( self ):
"""
Allows states to be keys of dictionaries.
"""
return hash( self.data )
def __str__( self ):
return str(self.data)
def initialize( self, layout, numGhostAgents=1000 ):
"""
Creates an initial game state from a layout array (see layout.py).
"""
self.data.initialize(layout, numGhostAgents)
############################################################################
# THE HIDDEN SECRETS OF PACMAN #
# #
# You shouldn't need to look through the code in this section of the file. #
############################################################################
SCARED_TIME = 40 # Moves ghosts are scared
COLLISION_TOLERANCE = 0.7 # How close ghosts must be to Pacman to kill
TIME_PENALTY = 1 # Number of points lost each round
class ClassicGameRules:
"""
These game rules manage the control flow of a game, deciding when
and how the game starts and ends.
"""
def __init__(self, timeout=30):
self.timeout = timeout
def newGame( self, layout, pacmanAgent, ghostAgents, display, quiet = False, catchExceptions=False):
agents = [pacmanAgent] + ghostAgents[:layout.getNumGhosts()]
initState = GameState()
initState.initialize( layout, len(ghostAgents) )
game = Game(agents, display, self, catchExceptions=catchExceptions)
game.state = initState
self.initialState = initState.deepCopy()
self.quiet = quiet
return game
def process(self, state, game):
"""
Checks to see whether it is time to end the game.
"""
if state.isWin(): self.win(state, game)
if state.isLose(): self.lose(state, game)
def win( self, state, game ):
if not self.quiet: print("Pacman emerges victorious! Score: %d" % state.data.score)
game.gameOver = True
def lose( self, state, game ):
if not self.quiet: print("Pacman died! Score: %d" % state.data.score)
game.gameOver = True
def getProgress(self, game):
return float(game.state.getNumFood()) / self.initialState.getNumFood()
def agentCrash(self, game, agentIndex):
if agentIndex == 0:
print("Pacman crashed")
else:
print("A ghost crashed")
def getMaxTotalTime(self, agentIndex):
return self.timeout
def getMaxStartupTime(self, agentIndex):
return self.timeout
def getMoveWarningTime(self, agentIndex):
return self.timeout
def getMoveTimeout(self, agentIndex):
return self.timeout
def getMaxTimeWarnings(self, agentIndex):
return 0
class PacmanRules:
"""
These functions govern how pacman interacts with his environment under
the classic game rules.
"""
PACMAN_SPEED=1
def getLegalActions( state ):
"""
Returns a list of possible actions.
"""
return Actions.getPossibleActions( state.getPacmanState().configuration, state.data.layout.walls )
getLegalActions = staticmethod( getLegalActions )
def applyAction( state, action ):
"""
Edits the state to reflect the results of the action.
"""
legal = PacmanRules.getLegalActions( state )
if action not in legal:
raise Exception("Illegal action " + str(action))
pacmanState = state.data.agentStates[0]
# Update Configuration
vector = Actions.directionToVector( action, PacmanRules.PACMAN_SPEED )
pacmanState.configuration = pacmanState.configuration.generateSuccessor( vector )
# Eat
next = pacmanState.configuration.getPosition()
nearest = nearestPoint( next )
if manhattanDistance( nearest, next ) <= 0.5 :
# Remove food
PacmanRules.consume( nearest, state )
applyAction = staticmethod( applyAction )
def consume( position, state ):
x,y = position
# Eat food
if state.data.food[x][y]:
state.data.scoreChange += 10
state.data.food = state.data.food.copy()
state.data.food[x][y] = False
state.data._foodEaten = position
# TODO: cache numFood?
numFood = state.getNumFood()
if numFood == 0 and not state.data._lose:
state.data.scoreChange += 500
state.data._win = True
# Eat capsule
if( position in state.getCapsules() ):
state.data.capsules.remove( position )
state.data._capsuleEaten = position
# Reset all ghosts' scared timers
for index in range( 1, len( state.data.agentStates ) ):
state.data.agentStates[index].scaredTimer = SCARED_TIME
consume = staticmethod( consume )
class GhostRules:
"""
These functions dictate how ghosts interact with their environment.
"""
GHOST_SPEED=1.0
def getLegalActions( state, ghostIndex ):
"""
Ghosts cannot stop, and cannot turn around unless they
reach a dead end, but can turn 90 degrees at intersections.
"""
conf = state.getGhostState( ghostIndex ).configuration
possibleActions = Actions.getPossibleActions( conf, state.data.layout.walls )
reverse = Actions.reverseDirection( conf.direction )
if Directions.STOP in possibleActions:
possibleActions.remove( Directions.STOP )
if reverse in possibleActions and len( possibleActions ) > 1:
possibleActions.remove( reverse )
return possibleActions
getLegalActions = staticmethod( getLegalActions )
def applyAction( state, action, ghostIndex):
legal = GhostRules.getLegalActions( state, ghostIndex )
if action not in legal:
raise Exception("Illegal ghost action " + str(action))
ghostState = state.data.agentStates[ghostIndex]
speed = GhostRules.GHOST_SPEED
if ghostState.scaredTimer > 0: speed /= 2.0
vector = Actions.directionToVector( action, speed )
ghostState.configuration = ghostState.configuration.generateSuccessor( vector )
applyAction = staticmethod( applyAction )
def decrementTimer( ghostState):
timer = ghostState.scaredTimer
if timer == 1:
ghostState.configuration.pos = nearestPoint( ghostState.configuration.pos )
ghostState.scaredTimer = max( 0, timer - 1 )
decrementTimer = staticmethod( decrementTimer )
def checkDeath( state, agentIndex):
pacmanPosition = state.getPacmanPosition()
if agentIndex == 0: # Pacman just moved; Anyone can kill him
for index in range( 1, len( state.data.agentStates ) ):
ghostState = state.data.agentStates[index]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill( pacmanPosition, ghostPosition ):
GhostRules.collide( state, ghostState, index )
else:
ghostState = state.data.agentStates[agentIndex]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill( pacmanPosition, ghostPosition ):
GhostRules.collide( state, ghostState, agentIndex )
checkDeath = staticmethod( checkDeath )
def collide( state, ghostState, agentIndex):
if ghostState.scaredTimer > 0:
state.data.scoreChange += 200
GhostRules.placeGhost(state, ghostState)
ghostState.scaredTimer = 0
# Added for first-person
state.data._eaten[agentIndex] = True
else:
if not state.data._win:
state.data.scoreChange -= 500
state.data._lose = True
collide = staticmethod( collide )
def canKill( pacmanPosition, ghostPosition ):
return manhattanDistance( ghostPosition, pacmanPosition ) <= COLLISION_TOLERANCE
canKill = staticmethod( canKill )
def placeGhost(state, ghostState):
ghostState.configuration = ghostState.start
placeGhost = staticmethod( placeGhost )
#############################
# FRAMEWORK TO START A GAME #
#############################
def default(str):
return str + ' [Default: %default]'
def parseAgentArgs(str):
if str == None: return {}
pieces = str.split(',')
opts = {}
for p in pieces:
if '=' in p:
key, val = p.split('=')
else:
key,val = p, 1
opts[key] = val
return opts
def readCommand( argv ):
"""
Processes the command used to run pacman from the command line.
"""
from optparse import OptionParser
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python pacman.py
- starts an interactive game
(2) python pacman.py --layout smallClassic --zoom 2
OR python pacman.py -l smallClassic -z 2
- starts an interactive game on a smaller board, zoomed in
"""
parser = OptionParser(usageStr)
parser.add_option('-n', '--numGames', dest='numGames', type='int',
help=default('the number of GAMES to play'), metavar='GAMES', default=1)
parser.add_option('-l', '--layout', dest='layout',
help=default('the LAYOUT_FILE from which to load the map layout'),
metavar='LAYOUT_FILE', default='mediumClassic')
parser.add_option('-p', '--pacman', dest='pacman',
help=default('the agent TYPE in the pacmanAgents module to use'),
metavar='TYPE', default='KeyboardAgent')
parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
help='Generate minimal output and no graphics', default=False)
parser.add_option('-g', '--ghosts', dest='ghost',
help=default('the ghost agent TYPE in the ghostAgents module to use'),
metavar = 'TYPE', default='RandomGhost')
parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
help=default('The maximum number of ghosts to use'), default=4)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom the size of the graphics window'), default=1.0)
parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('-r', '--recordActions', action='store_true', dest='record',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', dest='gameToReplay',
help='A recorded game file (pickle) to replay', default=None)
parser.add_option('-a','--agentArgs',dest='agentArgs',
help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('--frameTime', dest='frameTime', type='float',
help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
help='Turns on exception handling and timeouts during games', default=False)
parser.add_option('--timeout', dest='timeout', type='int',
help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0:
raise Exception('Command line input not understood: ' + str(otherjunk))
args = dict()
# Fix the random seed
if options.fixRandomSeed: random.seed('cs188')
# Choose a layout
args['layout'] = layout.getLayout( options.layout )
if args['layout'] == None: raise Exception("The layout " + options.layout + " cannot be found")
# Choose a Pacman agent
noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics)
pacmanType = loadAgent(options.pacman, noKeyboard)
agentOpts = parseAgentArgs(options.agentArgs)
if options.numTraining > 0:
args['numTraining'] = options.numTraining
if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining
pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
args['pacman'] = pacman
# Don't display training games
if 'numTrain' in agentOpts:
options.numQuiet = int(agentOpts['numTrain'])
options.numIgnore = int(agentOpts['numTrain'])
# Choose a ghost agent
ghostType = loadAgent(options.ghost, noKeyboard)
args['ghosts'] = [ghostType( i+1 ) for i in range( options.numGhosts )]
# Choose a display format
if options.quietGraphics:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.textGraphics:
import textDisplay
textDisplay.SLEEP_TIME = options.frameTime
args['display'] = textDisplay.PacmanGraphics()
else:
import graphicsDisplay
args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime = options.frameTime)
args['numGames'] = options.numGames
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['timeout'] = options.timeout
# Special case: recorded games don't use the runGames method or args structure
if options.gameToReplay != None:
print('Replaying recorded game %s.' % options.gameToReplay)
import pickle
f = open(options.gameToReplay)
try: recorded = pickle.load(f)
finally: f.close()
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
return args
def loadAgent(pacman, nographics):
# Looks through all pythonPath Directories for the right module,
pythonPathStr = os.path.expandvars("$PYTHONPATH")
if pythonPathStr.find(';') == -1:
pythonPathDirs = pythonPathStr.split(':')
else:
pythonPathDirs = pythonPathStr.split(';')
pythonPathDirs.append('.')
for moduleDir in pythonPathDirs:
if not os.path.isdir(moduleDir): continue
moduleNames = [f for f in os.listdir(moduleDir) if f.endswith('gents.py')]
for modulename in moduleNames:
try:
module = __import__(modulename[:-3])
except ImportError:
continue
if pacman in dir(module):
if nographics and modulename == 'keyboardAgents.py':
raise Exception('Using the keyboard requires graphics (not text display)')
return getattr(module, pacman)
raise Exception('The agent ' + pacman + ' is not specified in any *Agents.py.')
def replayGame( layout, actions, display ):
import pacmanAgents, ghostAgents
rules = ClassicGameRules()
agents = [pacmanAgents.GreedyAgent()] + [ghostAgents.RandomGhost(i+1) for i in range(layout.getNumGhosts())]
game = rules.newGame( layout, agents[0], agents[1:], display )
state = game.state
display.initialize(state.data)
for action in actions:
# Execute the action
state = state.generateSuccessor( *action )
# Change the display
display.update( state.data )
# Allow for game specific conditions (winning, losing, etc.)
rules.process(state, game)
display.finish()
def runGames( layout, pacman, ghosts, display, numGames, record, numTraining = 0, catchExceptions=False, timeout=30 ):
import __main__
__main__.__dict__['_display'] = display
rules = ClassicGameRules(timeout)
games = []
for i in range( numGames ):
beQuiet = i < numTraining
if beQuiet:
# Suppress output and graphics
import textDisplay
gameDisplay = textDisplay.NullGraphics()
rules.quiet = True
else:
gameDisplay = display
rules.quiet = False
game = rules.newGame( layout, pacman, ghosts, gameDisplay, beQuiet, catchExceptions)
game.run()
if not beQuiet: games.append(game)
if record:
import time, pickle
fname = ('recorded-game-%d' % (i + 1)) + '-'.join([str(t) for t in time.localtime()[1:6]])
f = file(fname, 'w')
components = {'layout': layout, 'actions': game.moveHistory}
pickle.dump(components, f)
f.close()
if (numGames-numTraining) > 0:
scores = [game.state.getScore() for game in games]
wins = [game.state.isWin() for game in games]
winRate = wins.count(True)/ float(len(wins))
print('Average Score:', sum(scores) / float(len(scores)))
print('Scores: ', ', '.join([str(score) for score in scores]))
print('Win Rate: %d/%d (%.2f)' % (wins.count(True), len(wins), winRate))
print('Record: ', ', '.join([ ['Loss', 'Win'][int(w)] for w in wins]))
return games
if __name__ == '__main__':
"""
The main function called when pacman.py is run
from the command line:
> python pacman.py
See the usage string for more details.
> python pacman.py --help
"""
args = readCommand( sys.argv[1:] ) # Get game components based on input
runGames( **args )
# import cProfile
# cProfile.run("runGames( **args )")
pass
| 38.446715
| 119
| 0.617216
|
794e937aba1acbaa3e66d8ba295a9545b3716a49
| 2,453
|
py
|
Python
|
data/p4VQE/R4/benchmark/startQiskit_noisy666.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_noisy666.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_noisy666.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=8
prog.h(input_qubit[2]) # number=3
prog.x(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.x(input_qubit[2]) # number=10
prog.x(input_qubit[2]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy666.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 26.956044
| 118
| 0.630656
|
794e9389dcf556f5cc3b7cf14ccb205f42426610
| 2,865
|
py
|
Python
|
utest/running/test_imports.py
|
zahed3795/robotframework
|
9fb227f9116332bb4361271b41165acd94fc5956
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
utest/running/test_imports.py
|
zahed3795/robotframework
|
9fb227f9116332bb4361271b41165acd94fc5956
|
[
"ECL-2.0",
"Apache-2.0"
] | 29
|
2021-01-26T07:09:54.000Z
|
2022-03-28T10:38:54.000Z
|
utest/running/test_imports.py
|
zahed3795/robotframework
|
9fb227f9116332bb4361271b41165acd94fc5956
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-04-23T10:03:05.000Z
|
2018-04-23T10:03:05.000Z
|
import unittest
from robot.running import TestSuite
from robot.utils import StringIO
from robot.utils.asserts import assert_equal, assert_raises_with_msg
def run(suite, **config):
result = suite.run(output=None, log=None, report=None,
stdout=StringIO(), stderr=StringIO(), **config)
return result.suite
def assert_suite(suite, name, status, message='', tests=1):
assert_equal(suite.name, name)
assert_equal(suite.status, status)
assert_equal(suite.message, message)
assert_equal(len(suite.tests), tests)
def assert_test(test, name, status, tags=(), msg=''):
assert_equal(test.name, name)
assert_equal(test.status, status)
assert_equal(test.message, msg)
assert_equal(tuple(test.tags), tags)
class TestImports(unittest.TestCase):
def test_imports(self):
suite = TestSuite(name='Suite')
suite.resource.imports.create('Library', 'OperatingSystem')
suite.tests.create(name='Test').body.create('Directory Should Exist',
args=['.'])
result = run(suite)
assert_suite(result, 'Suite', 'PASS')
assert_test(result.tests[0], 'Test', 'PASS')
def test_library_imports(self):
suite = TestSuite(name='Suite')
suite.resource.imports.library('OperatingSystem')
suite.tests.create(name='Test').body.create('Directory Should Exist',
args=['.'])
result = run(suite)
assert_suite(result, 'Suite', 'PASS')
assert_test(result.tests[0], 'Test', 'PASS')
def test_resource_imports(self):
suite = TestSuite(name='Suite')
suite.resource.imports.resource('test_resource.txt')
suite.tests.create(name='Test').body.create('My Test Keyword')
assert_equal(suite.tests[0].body[0].name, 'My Test Keyword')
result = run(suite)
assert_suite(result, 'Suite', 'PASS')
assert_test(result.tests[0], 'Test', 'PASS')
def test_variable_imports(self):
suite = TestSuite(name='Suite')
suite.resource.imports.variables('variables_file.py')
suite.tests.create(name='Test').body.create(
'Should Be Equal As Strings',
args=['${MY_VARIABLE}', 'An example string']
)
result = run(suite)
assert_suite(result, 'Suite', 'PASS')
assert_test(result.tests[0], 'Test', 'PASS')
def test_invalid_import_type(self):
assert_raises_with_msg(ValueError,
"Invalid import type 'InvalidType'. Should be "
"one of 'Library', 'Resource' or 'Variables'.",
TestSuite().resource.imports.create,
'InvalidType', 'Name')
if __name__ == '__main__':
unittest.main()
| 37.697368
| 78
| 0.605585
|
794e94ca6b575f6b93b63a0df021a3d83ea9ce5b
| 1,962
|
py
|
Python
|
restfulpy/tests/test_smtp_provider.py
|
maryayi/restfulpy
|
df4a88a7cc2740c37fd8d80c310e0372b83cd8e0
|
[
"MIT"
] | 1
|
2021-06-11T21:39:44.000Z
|
2021-06-11T21:39:44.000Z
|
restfulpy/tests/test_smtp_provider.py
|
maryayi/restfulpy
|
df4a88a7cc2740c37fd8d80c310e0372b83cd8e0
|
[
"MIT"
] | null | null | null |
restfulpy/tests/test_smtp_provider.py
|
maryayi/restfulpy
|
df4a88a7cc2740c37fd8d80c310e0372b83cd8e0
|
[
"MIT"
] | null | null | null |
import io
import unittest
from os.path import dirname, abspath, join
from nanohttp import settings, configure
from restfulpy.messaging.providers import SmtpProvider
from restfulpy.testing.mockup import smtp_server
HERE = abspath(dirname(__file__))
class SmtpProviderTestCase(unittest.TestCase):
__configuration__ = '''
smtp:
host: smtp.example.com
port: 587
username: user@example.com
password: password
local_hostname: localhost
tls: false
auth: false
ssl: false
messaging:
mako_modules_directory: %s
template_dirs:
- %s
''' % (
join(HERE, '../../data', 'mako_modules'),
join(HERE, 'templates'),
)
@classmethod
def setUpClass(cls):
configure(init_value=cls.__configuration__, force=True)
def test_smtp_provider(self):
with smtp_server() as (server, bind):
settings.smtp.host = bind[0]
settings.smtp.port = bind[1]
# Without templates
SmtpProvider().send(
'test@example.com',
'test@example.com',
'Simple test body',
cc='test@example.com',
bcc='test@example.com'
)
# With template
SmtpProvider().send(
'test@example.com',
'test@example.com',
{},
template_filename='test-email-template.mako'
)
# With attachments
attachment = io.BytesIO(b'This is test attachment file')
attachment.name = 'path/to/file.txt'
SmtpProvider().send(
'test@example.com',
'test@example.com',
'email body with Attachment',
attachments=[attachment]
)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 26.876712
| 68
| 0.538226
|
794e9550fc58b3830385741ac05094b4a021811d
| 486
|
py
|
Python
|
old_logen/pylogen/logen_dispatch.py
|
leuschel/logen
|
0ea806f54628162615e25177c3ed98f6b2c27935
|
[
"Apache-2.0"
] | 14
|
2015-10-16T11:35:30.000Z
|
2021-05-12T15:31:16.000Z
|
old_logen/pylogen/logen_dispatch.py
|
leuschel/logen
|
0ea806f54628162615e25177c3ed98f6b2c27935
|
[
"Apache-2.0"
] | null | null | null |
old_logen/pylogen/logen_dispatch.py
|
leuschel/logen
|
0ea806f54628162615e25177c3ed98f6b2c27935
|
[
"Apache-2.0"
] | 5
|
2015-10-16T12:44:41.000Z
|
2019-10-02T02:45:38.000Z
|
import os
import development
def get_pylogen_cmd():
if development.get_development():
cmdline = "sicstus -r pylogen_main.sav --goal \"runtime_entry(start),halt.\" -a"
else:
cmdline = os.path.join(".","pylogen")
return cmdline
def build_dispatcher(path,Spec,Memo,File,Output):
cmd = get_pylogen_cmd()
command = "%s -dispatch '%s' '%s' '%s' '%s' '%s'" % (cmd, path,Spec,Memo,File,Output)
print command
os.system(command)
| 19.44
| 89
| 0.619342
|
794e977690c154aa974b43c0bf271097cf540e0d
| 2,324
|
py
|
Python
|
tg/support/middlewares.py
|
sergiobrr/tg2
|
401d77d82bd9daacb9444150c63bb039bf003436
|
[
"MIT"
] | 812
|
2015-01-16T22:57:52.000Z
|
2022-03-27T04:49:40.000Z
|
tg/support/middlewares.py
|
sergiobrr/tg2
|
401d77d82bd9daacb9444150c63bb039bf003436
|
[
"MIT"
] | 74
|
2015-02-18T17:55:31.000Z
|
2021-12-13T10:41:08.000Z
|
tg/support/middlewares.py
|
sergiobrr/tg2
|
401d77d82bd9daacb9444150c63bb039bf003436
|
[
"MIT"
] | 72
|
2015-06-10T06:02:45.000Z
|
2022-03-27T08:37:24.000Z
|
from tg.request_local import Request, Response
import logging
log = logging.getLogger(__name__)
class SeekableRequestBodyMiddleware(object):
def __init__(self, app):
self.app = app
def _stream_response(self, data):
try:
for chunk in data:
yield chunk
finally:
if hasattr(data, 'close'): # pragma: no cover
data.close()
def __call__(self, environ, start_response):
log.debug("Making request body seekable")
Request(environ).make_body_seekable()
return self._stream_response(self.app(environ, start_response))
class DBSessionRemoverMiddleware(object):
def __init__(self, DBSession, app):
self.app = app
self.DBSession = DBSession
def _stream_response(self, data):
try:
for chunk in data:
yield chunk
finally:
log.debug("Removing DBSession from current thread")
if hasattr(data, 'close'):
data.close()
self.DBSession.remove()
def __call__(self, environ, start_response):
try:
return self._stream_response(self.app(environ, start_response))
except:
log.debug("Removing DBSession from current thread")
self.DBSession.remove()
raise
class MingSessionRemoverMiddleware(object):
def __init__(self, ThreadLocalODMSession, app):
self.app = app
self.ThreadLocalODMSession = ThreadLocalODMSession
def _stream_response(self, data):
try:
for chunk in data:
yield chunk
finally:
log.debug("Removing ThreadLocalODMSession from current thread")
if hasattr(data, 'close'):
data.close()
self.ThreadLocalODMSession.close_all()
def __call__(self, environ, start_response):
try:
return self._stream_response(self.app(environ, start_response))
except:
log.debug("Removing ThreadLocalODMSession from current thread")
self.ThreadLocalODMSession.close_all()
raise
from .statics import StaticsMiddleware
__all__ = ['StaticsMiddleware', 'SeekableRequestBodyMiddleware',
'DBSessionRemoverMiddleware', 'MingSessionRemoverMiddleware']
| 30.181818
| 75
| 0.626936
|
794e97b9528374fd9e20a89c87553ac37988de7c
| 14,492
|
py
|
Python
|
src/ptb/main.py
|
richardfat7/enas
|
e830fc1ad50be1824162719f2b005ade08451359
|
[
"Apache-2.0"
] | null | null | null |
src/ptb/main.py
|
richardfat7/enas
|
e830fc1ad50be1824162719f2b005ade08451359
|
[
"Apache-2.0"
] | null | null | null |
src/ptb/main.py
|
richardfat7/enas
|
e830fc1ad50be1824162719f2b005ade08451359
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cPickle as pickle
import shutil
import sys
import time
import numpy as np
import tensorflow as tf
from src import utils
from src.utils import Logger
from src.utils import DEFINE_boolean
from src.utils import DEFINE_float
from src.utils import DEFINE_integer
from src.utils import DEFINE_string
from src.utils import print_user_flags
from src.ptb.ptb_enas_child import PTBEnasChild
from src.ptb.ptb_enas_controller import PTBEnasController
flags = tf.app.flags
FLAGS = flags.FLAGS
DEFINE_boolean("reset_output_dir", False, "Delete output_dir if exists.")
DEFINE_string("data_path", "", "")
DEFINE_string("output_dir", "", "")
DEFINE_string("search_for", None, "[rhn|base|enas]")
DEFINE_string("child_fixed_arc", None, "")
DEFINE_integer("batch_size", 25, "")
DEFINE_integer("child_base_number", 4, "")
DEFINE_integer("child_num_layers", 2, "")
DEFINE_integer("child_bptt_steps", 20, "")
DEFINE_integer("child_lstm_hidden_size", 200, "")
DEFINE_float("child_lstm_e_keep", 1.0, "")
DEFINE_float("child_lstm_x_keep", 1.0, "")
DEFINE_float("child_lstm_h_keep", 1.0, "")
DEFINE_float("child_lstm_o_keep", 1.0, "")
DEFINE_boolean("child_lstm_l_skip", False, "")
DEFINE_float("child_lr", 1.0, "")
DEFINE_float("child_lr_dec_rate", 0.5, "")
DEFINE_float("child_grad_bound", 5.0, "")
DEFINE_float("child_temperature", None, "")
DEFINE_float("child_l2_reg", None, "")
DEFINE_float("child_lr_dec_min", None, "")
DEFINE_float("child_optim_moving_average", None,
"Use the moving average of Variables")
DEFINE_float("child_rnn_l2_reg", None, "")
DEFINE_float("child_rnn_slowness_reg", None, "")
DEFINE_float("child_lr_warmup_val", None, "")
DEFINE_float("child_reset_train_states", None, "")
DEFINE_integer("child_lr_dec_start", 4, "")
DEFINE_integer("child_lr_dec_every", 1, "")
DEFINE_integer("child_avg_pool_size", 1, "")
DEFINE_integer("child_block_size", 1, "")
DEFINE_integer("child_rhn_depth", 4, "")
DEFINE_integer("child_lr_warmup_steps", None, "")
DEFINE_string("child_optim_algo", "sgd", "")
DEFINE_boolean("child_sync_replicas", False, "")
DEFINE_integer("child_num_aggregate", 1, "")
DEFINE_integer("child_num_replicas", 1, "")
DEFINE_float("controller_lr", 1e-3, "")
DEFINE_float("controller_lr_dec_rate", 1.0, "")
DEFINE_float("controller_keep_prob", 0.5, "")
DEFINE_float("controller_l2_reg", 0.0, "")
DEFINE_float("controller_bl_dec", 0.99, "")
DEFINE_float("controller_tanh_constant", None, "")
DEFINE_float("controller_temperature", None, "")
DEFINE_float("controller_entropy_weight", None, "")
DEFINE_float("controller_skip_target", None, "")
DEFINE_float("controller_skip_rate", None, "")
DEFINE_integer("controller_num_aggregate", 1, "")
DEFINE_integer("controller_num_replicas", 1, "")
DEFINE_integer("controller_train_steps", 50, "")
DEFINE_integer("controller_train_every", 2,
"train the controller after how many this number of epochs")
DEFINE_boolean("controller_sync_replicas", False, "To sync or not to sync.")
DEFINE_boolean("controller_training", True, "")
DEFINE_integer("num_epochs", 300, "")
DEFINE_integer("log_every", 50, "How many steps to log")
DEFINE_integer("eval_every_epochs", 1, "How many epochs to eval")
def get_ops(x_train, x_valid, x_test):
"""Create relevant models."""
ops = {}
if FLAGS.search_for == "enas":
assert FLAGS.child_lstm_hidden_size % FLAGS.child_block_size == 0, (
"--child_block_size has to divide child_lstm_hidden_size")
if FLAGS.child_fixed_arc is not None:
assert not FLAGS.controller_training, (
"with --child_fixed_arc, cannot train controller")
child_model = PTBEnasChild(
x_train,
x_valid,
x_test,
rnn_l2_reg=FLAGS.child_rnn_l2_reg,
rnn_slowness_reg=FLAGS.child_rnn_slowness_reg,
rhn_depth=FLAGS.child_rhn_depth,
fixed_arc=FLAGS.child_fixed_arc,
batch_size=FLAGS.batch_size,
bptt_steps=FLAGS.child_bptt_steps,
lstm_num_layers=FLAGS.child_num_layers,
lstm_hidden_size=FLAGS.child_lstm_hidden_size,
lstm_e_keep=FLAGS.child_lstm_e_keep,
lstm_x_keep=FLAGS.child_lstm_x_keep,
lstm_h_keep=FLAGS.child_lstm_h_keep,
lstm_o_keep=FLAGS.child_lstm_o_keep,
lstm_l_skip=FLAGS.child_lstm_l_skip,
vocab_size=10000,
lr_init=FLAGS.child_lr,
lr_dec_start=FLAGS.child_lr_dec_start,
lr_dec_every=FLAGS.child_lr_dec_every,
lr_dec_rate=FLAGS.child_lr_dec_rate,
lr_dec_min=FLAGS.child_lr_dec_min,
lr_warmup_val=FLAGS.child_lr_warmup_val,
lr_warmup_steps=FLAGS.child_lr_warmup_steps,
l2_reg=FLAGS.child_l2_reg,
optim_moving_average=FLAGS.child_optim_moving_average,
clip_mode="global",
grad_bound=FLAGS.child_grad_bound,
optim_algo="sgd",
sync_replicas=FLAGS.child_sync_replicas,
num_aggregate=FLAGS.child_num_aggregate,
num_replicas=FLAGS.child_num_replicas,
temperature=FLAGS.child_temperature,
name="ptb_enas_model")
if FLAGS.child_fixed_arc is None:
controller_model = PTBEnasController(
rhn_depth=FLAGS.child_rhn_depth,
lstm_size=100,
lstm_num_layers=1,
lstm_keep_prob=1.0,
tanh_constant=FLAGS.controller_tanh_constant,
temperature=FLAGS.controller_temperature,
lr_init=FLAGS.controller_lr,
lr_dec_start=0,
lr_dec_every=1000000, # never decrease learning rate
l2_reg=FLAGS.controller_l2_reg,
entropy_weight=FLAGS.controller_entropy_weight,
bl_dec=FLAGS.controller_bl_dec,
optim_algo="adam",
sync_replicas=FLAGS.controller_sync_replicas,
num_aggregate=FLAGS.controller_num_aggregate,
num_replicas=FLAGS.controller_num_replicas)
child_model.connect_controller(controller_model)
controller_model.build_trainer(child_model)
controller_ops = {
"train_step": controller_model.train_step,
"loss": controller_model.loss,
"train_op": controller_model.train_op,
"lr": controller_model.lr,
"grad_norm": controller_model.grad_norm,
"valid_ppl": controller_model.valid_ppl,
"optimizer": controller_model.optimizer,
"baseline": controller_model.baseline,
"ppl": controller_model.ppl,
"reward": controller_model.reward,
"entropy": controller_model.sample_entropy,
"sample_arc": controller_model.sample_arc,
}
else:
child_model.connect_controller(None)
controller_ops = None
else:
raise ValueError("Unknown search_for {}".format(FLAGS.search_for))
child_ops = {
"global_step": child_model.global_step,
"loss": child_model.loss,
"train_op": child_model.train_op,
"train_ppl": child_model.train_ppl,
"train_reset": child_model.train_reset,
"valid_reset": child_model.valid_reset,
"test_reset": child_model.test_reset,
"lr": child_model.lr,
"grad_norm": child_model.grad_norm,
"optimizer": child_model.optimizer,
}
ops = {
"child": child_ops,
"controller": controller_ops,
"num_train_batches": child_model.num_train_batches,
"eval_every": child_model.num_train_batches * FLAGS.eval_every_epochs,
"eval_func": child_model.eval_once,
}
return ops
def train(mode="train"):
assert mode in ["train", "eval"], "Unknown mode '{0}'".format(mode)
with open(FLAGS.data_path) as finp:
x_train, x_valid, x_test, _, _ = pickle.load(finp)
print("-" * 80)
print("train_size: {0}".format(np.size(x_train)))
print("valid_size: {0}".format(np.size(x_valid)))
print(" test_size: {0}".format(np.size(x_test)))
g = tf.Graph()
with g.as_default():
ops = get_ops(x_train, x_valid, x_test)
child_ops = ops["child"]
controller_ops = ops["controller"]
if FLAGS.child_optim_moving_average is None or mode == "eval":
saver = tf.train.Saver(max_to_keep=10)
else:
saver = child_ops["optimizer"].swapping_saver(max_to_keep=10)
checkpoint_saver_hook = tf.train.CheckpointSaverHook(
FLAGS.output_dir, save_steps=ops["num_train_batches"], saver=saver)
hooks = [checkpoint_saver_hook]
if FLAGS.child_sync_replicas:
sync_replicas_hook = child_ops["optimizer"].make_session_run_hook(True)
hooks.append(sync_replicas_hook)
if FLAGS.controller_training and FLAGS.controller_sync_replicas:
hooks.append(controller_ops["optimizer"].make_session_run_hook(True))
print("-" * 80)
print("Starting session")
config = tf.ConfigProto()
# use GPU1
config.gpu_options.visible_device_list = '1,2,4,7'
# allocate 50% of GPU memory
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.7
with tf.train.SingularMonitoredSession(
config=config, hooks=hooks, checkpoint_dir=FLAGS.output_dir) as sess:
start_time = time.time()
if mode == "eval":
sess.run(child_ops["valid_reset"])
ops["eval_func"](sess, "valid", verbose=True)
sess.run(child_ops["test_reset"])
ops["eval_func"](sess, "test", verbose=True)
sys.exit(0)
num_batches = 0
total_tr_ppl = 0
best_valid_ppl = 67.00
while True:
run_ops = [
child_ops["loss"],
child_ops["lr"],
child_ops["grad_norm"],
child_ops["train_ppl"],
child_ops["train_op"],
]
loss, lr, gn, tr_ppl, _ = sess.run(run_ops)
num_batches += 1
total_tr_ppl += loss / FLAGS.child_bptt_steps
global_step = sess.run(child_ops["global_step"])
if FLAGS.child_sync_replicas:
actual_step = global_step * FLAGS.num_aggregate
else:
actual_step = global_step
epoch = actual_step // ops["num_train_batches"]
curr_time = time.time()
if global_step % FLAGS.log_every == 0:
log_string = ""
log_string += "epoch={:<6d}".format(epoch)
log_string += " ch_step={:<6d}".format(global_step)
log_string += " loss={:<8.4f}".format(loss)
log_string += " lr={:<8.4f}".format(lr)
log_string += " |g|={:<10.2f}".format(gn)
log_string += " tr_ppl={:<8.2f}".format(
np.exp(total_tr_ppl / num_batches))
log_string += " mins={:<10.2f}".format(
float(curr_time - start_time) / 60)
print(log_string)
if (FLAGS.child_reset_train_states is not None and
np.random.uniform(0, 1) < FLAGS.child_reset_train_states):
print("reset train states")
sess.run([
child_ops["train_reset"],
child_ops["valid_reset"],
child_ops["test_reset"],
])
if actual_step % ops["eval_every"] == 0:
sess.run([
child_ops["train_reset"],
child_ops["valid_reset"],
child_ops["test_reset"],
])
if (FLAGS.controller_training and
epoch % FLAGS.controller_train_every == 0):
sess.run([
child_ops["train_reset"],
child_ops["valid_reset"],
child_ops["test_reset"],
])
print("Epoch {}: Training controller".format(epoch))
for ct_step in xrange(FLAGS.controller_train_steps *
FLAGS.controller_num_aggregate):
run_ops = [
controller_ops["loss"],
controller_ops["entropy"],
controller_ops["lr"],
controller_ops["grad_norm"],
controller_ops["reward"],
controller_ops["baseline"],
controller_ops["train_op"],
]
loss, entropy, lr, gn, rw, bl, _ = sess.run(run_ops)
controller_step = sess.run(controller_ops["train_step"])
if ct_step % FLAGS.log_every == 0:
curr_time = time.time()
log_string = ""
log_string += "ctrl_step={:<6d}".format(controller_step)
log_string += " loss={:<7.3f}".format(loss)
log_string += " ent={:<5.2f}".format(entropy)
log_string += " lr={:<6.4f}".format(lr)
log_string += " |g|={:<10.7f}".format(gn)
log_string += " rw={:<7.3f}".format(rw)
log_string += " bl={:<7.3f}".format(bl)
log_string += " mins={:<.2f}".format(
float(curr_time - start_time) / 60)
print(log_string)
print("Here are 10 architectures")
for _ in xrange(10):
arc, rw = sess.run([
controller_ops["sample_arc"],
controller_ops["reward"],
])
print("{} rw={:<.3f}".format(np.reshape(arc, [-1]), rw))
sess.run([
child_ops["train_reset"],
child_ops["valid_reset"],
child_ops["test_reset"],
])
print("Epoch {}: Eval".format(epoch))
valid_ppl = ops["eval_func"](sess, "valid")
if valid_ppl < best_valid_ppl:
best_valid_ppl = valid_ppl
sess.run(child_ops["test_reset"])
ops["eval_func"](sess, "test", verbose=True)
sess.run([
child_ops["train_reset"],
child_ops["valid_reset"],
child_ops["test_reset"],
])
total_tr_ppl = 0
num_batches = 0
print("-" * 80)
if epoch >= FLAGS.num_epochs:
ops["eval_func"](sess, "test", verbose=True)
break
def main(_):
print("-" * 80)
if not os.path.isdir(FLAGS.output_dir):
print("Path {} does not exist. Creating.".format(FLAGS.output_dir))
os.makedirs(FLAGS.output_dir)
elif FLAGS.reset_output_dir:
print("Path {} exists. Remove and remake.".format(FLAGS.output_dir))
shutil.rmtree(FLAGS.output_dir)
os.makedirs(FLAGS.output_dir)
print("-" * 80)
log_file = os.path.join(FLAGS.output_dir, "stdout")
print("Logging to {}".format(log_file))
sys.stdout = Logger(log_file)
utils.print_user_flags()
train(mode="train")
if __name__ == "__main__":
tf.app.run()
| 36.23
| 77
| 0.642354
|
794e98703d960c8f9e7efd2722f476e98874392c
| 120,828
|
py
|
Python
|
scripts/linters/pylint_extensions_test.py
|
TheoLipeles/oppia
|
cd0bb873e08fa716014f3d1480fbbfee95b89121
|
[
"Apache-2.0"
] | 2
|
2021-03-07T18:39:15.000Z
|
2021-03-29T20:09:11.000Z
|
scripts/linters/pylint_extensions_test.py
|
TheoLipeles/oppia
|
cd0bb873e08fa716014f3d1480fbbfee95b89121
|
[
"Apache-2.0"
] | null | null | null |
scripts/linters/pylint_extensions_test.py
|
TheoLipeles/oppia
|
cd0bb873e08fa716014f3d1480fbbfee95b89121
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For details on how to write such tests, please refer to
# https://github.com/oppia/oppia/wiki/Writing-Tests-For-Pylint
"""Unit tests for scripts/pylint_extensions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import tempfile
import unittest
import python_utils
from . import pylint_extensions
import astroid # isort:skip
from pylint import testutils # isort:skip
from pylint import lint # isort:skip
from pylint import utils # isort:skip
class ExplicitKeywordArgsCheckerTests(unittest.TestCase):
def setUp(self):
super(ExplicitKeywordArgsCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.ExplicitKeywordArgsChecker)
self.checker_test_object.setup_method()
def test_finds_non_explicit_keyword_args(self):
(
func_call_node_one, func_call_node_two, func_call_node_three,
func_call_node_four, func_call_node_five, func_call_node_six,
class_call_node) = astroid.extract_node(
"""
class TestClass():
pass
def test(test_var_one, test_var_two=4, test_var_three=5,
test_var_four="test_checker"):
test_var_five = test_var_two + test_var_three
return test_var_five
def test_1(test_var_one, test_var_one):
pass
def test_2((a, b)):
pass
test(2, 5, test_var_three=6) #@
test(2) #@
test(2, 6, test_var_two=5, test_var_four="test_checker") #@
max(5, 1) #@
test_1(1, 2) #@
test_2((1, 2)) #@
TestClass() #@
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='non-explicit-keyword-args',
node=func_call_node_one,
args=(
'\'test_var_two\'',
'function',
'test'
)
),
):
self.checker_test_object.checker.visit_call(
func_call_node_one)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(
func_call_node_two)
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='non-explicit-keyword-args',
node=func_call_node_three,
args=(
'\'test_var_three\'',
'function',
'test'
)
)
):
self.checker_test_object.checker.visit_call(
func_call_node_three)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(class_call_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(func_call_node_four)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(func_call_node_five)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(func_call_node_six)
def test_finds_arg_name_for_non_keyword_arg(self):
node_arg_name_for_non_keyword_arg = astroid.extract_node(
"""
def test(test_var_one, test_var_two=4, test_var_three=5):
test_var_five = test_var_two + test_var_three
return test_var_five
test(test_var_one=2, test_var_two=5) #@
""")
message = testutils.Message(
msg_id='arg-name-for-non-keyword-arg',
node=node_arg_name_for_non_keyword_arg,
args=('\'test_var_one\'', 'function', 'test'))
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_call(
node_arg_name_for_non_keyword_arg)
def test_correct_use_of_keyword_args(self):
node_with_no_error_message = astroid.extract_node(
"""
def test(test_var_one, test_var_two=4, test_var_three=5):
test_var_five = test_var_two + test_var_three
return test_var_five
test(2, test_var_two=2) #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(
node_with_no_error_message)
def test_function_with_args_and_kwargs(self):
node_with_args_and_kwargs = astroid.extract_node(
"""
def test_1(*args, **kwargs):
pass
test_1(first=1, second=2) #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(
node_with_args_and_kwargs)
def test_constructor_call_with_keyword_arguments(self):
node_with_no_error_message = astroid.extract_node(
"""
class TestClass():
def __init__(self, first, second):
pass
TestClass(first=1, second=2) #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_call(
node_with_no_error_message)
def test_register(self):
pylinter_instance = lint.PyLinter()
pylint_extensions.register(pylinter_instance)
class HangingIndentCheckerTests(unittest.TestCase):
def setUp(self):
super(HangingIndentCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.HangingIndentChecker)
self.checker_test_object.setup_method()
def test_no_break_after_hanging_indentation(self):
node_break_after_hanging_indent = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""self.post_json('/ml/\\trainedclassifierhandler',
self.payload, expect_errors=True, expected_status_int=401)
if (a > 1 and
b > 2):
""")
node_break_after_hanging_indent.file = filename
node_break_after_hanging_indent.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_break_after_hanging_indent))
message = testutils.Message(
msg_id='no-break-after-hanging-indent', line=1)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_no_break_after_hanging_indentation_with_comment(self):
node_break_after_hanging_indent = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""self.post_json('/ml/\\trainedclassifierhandler',
self.payload, expect_errors=True, expected_status_int=401)
if (a > 1 and
b > 2): # pylint: disable=invalid-name
""")
node_break_after_hanging_indent.file = filename
node_break_after_hanging_indent.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_break_after_hanging_indent))
message = testutils.Message(
msg_id='no-break-after-hanging-indent', line=1)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_break_after_hanging_indentation(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""\"\"\"Some multiline
docstring.
\"\"\"
# Load JSON.
master_translation_dict = json.loads(
utils.get_file_contents(os.path.join(
os.getcwd(), 'assets', 'i18n', 'en.json')))
""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_with_no_error_message))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_hanging_indentation_with_a_comment_after_bracket(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""self.post_json( # Random comment
'(',
self.payload, expect_errors=True, expected_status_int=401)""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_with_no_error_message))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_hanging_indentation_with_a_comment_after_two_or_more_bracket(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""self.post_json(func( # Random comment
'(',
self.payload, expect_errors=True, expected_status_int=401))""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_with_no_error_message))
with self.checker_test_object.assertNoMessages():
temp_file.close()
class DocstringParameterCheckerTests(unittest.TestCase):
def setUp(self):
super(DocstringParameterCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.DocstringParameterChecker)
self.checker_test_object.setup_method()
def test_no_newline_below_class_docstring(self):
node_no_newline_below_class_docstring = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a docstring.\"\"\"
a = 1 + 2
""")
node_no_newline_below_class_docstring.file = filename
node_no_newline_below_class_docstring.path = filename
self.checker_test_object.checker.visit_classdef(
node_no_newline_below_class_docstring)
message = testutils.Message(
msg_id='newline-below-class-docstring',
node=node_no_newline_below_class_docstring)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_excessive_newline_below_class_docstring(self):
node_excessive_newline_below_class_docstring = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a docstring.\"\"\"
a = 1 + 2
""")
node_excessive_newline_below_class_docstring.file = filename
node_excessive_newline_below_class_docstring.path = filename
self.checker_test_object.checker.visit_classdef(
node_excessive_newline_below_class_docstring)
message = testutils.Message(
msg_id='newline-below-class-docstring',
node=node_excessive_newline_below_class_docstring)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_inline_comment_after_class_docstring(self):
node_inline_comment_after_class_docstring = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a docstring.\"\"\"
# This is a comment.
def func():
a = 1 + 2
""")
node_inline_comment_after_class_docstring.file = filename
node_inline_comment_after_class_docstring.path = filename
self.checker_test_object.checker.visit_classdef(
node_inline_comment_after_class_docstring)
message = testutils.Message(
msg_id='newline-below-class-docstring',
node=node_inline_comment_after_class_docstring)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_multiline_class_argument_with_incorrect_style(self):
node_multiline_class_argument_with_incorrect_style = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(
dummy_class):
\"\"\"This is a docstring.\"\"\"
a = 1 + 2
""")
node_multiline_class_argument_with_incorrect_style.file = filename
node_multiline_class_argument_with_incorrect_style.path = filename
self.checker_test_object.checker.visit_classdef(
node_multiline_class_argument_with_incorrect_style)
message = testutils.Message(
msg_id='newline-below-class-docstring',
node=node_multiline_class_argument_with_incorrect_style)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_multiline_class_argument_with_correct_style(self):
node_multiline_class_argument_with_correct_style = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(
dummy_class):
\"\"\"This is a docstring.\"\"\"
a = 1 + 2
""")
node_multiline_class_argument_with_correct_style.file = filename
node_multiline_class_argument_with_correct_style.path = filename
self.checker_test_object.checker.visit_classdef(
node_multiline_class_argument_with_correct_style)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_single_newline_below_class_docstring(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a multiline docstring.\"\"\"
a = 1 + 2
""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.visit_classdef(
node_with_no_error_message)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_class_with_no_docstring(self):
node_class_with_no_docstring = astroid.scoped_nodes.Module(
name='test',
doc=None)
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
a = 1 + 2
""")
node_class_with_no_docstring.file = filename
node_class_with_no_docstring.path = filename
self.checker_test_object.checker.visit_classdef(
node_class_with_no_docstring)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_newline_before_docstring_with_correct_style(self):
node_newline_before_docstring_with_correct_style = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a multiline docstring.\"\"\"
a = 1 + 2
""")
node_newline_before_docstring_with_correct_style.file = filename
node_newline_before_docstring_with_correct_style.path = filename
self.checker_test_object.checker.visit_classdef(
node_newline_before_docstring_with_correct_style)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_newline_before_docstring_with_incorrect_style(self):
node_newline_before_docstring_with_incorrect_style = (
astroid.scoped_nodes.Module(
name='test',
doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
class ClassName(dummy_class):
\"\"\"This is a multiline docstring.\"\"\"
a = 1 + 2
""")
node_newline_before_docstring_with_incorrect_style.file = filename
node_newline_before_docstring_with_incorrect_style.path = filename
self.checker_test_object.checker.visit_classdef(
node_newline_before_docstring_with_incorrect_style)
message = testutils.Message(
msg_id='newline-below-class-docstring',
node=node_newline_before_docstring_with_incorrect_style)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_malformed_args_section(self):
node_malformed_args_section = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Does nothing.
Args:
arg: Argument description.
\"\"\"
a = True
""")
message = testutils.Message(
msg_id='malformed-args-section',
node=node_malformed_args_section
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_malformed_args_section)
def test_malformed_returns_section(self):
node_malformed_returns_section = astroid.extract_node(
u"""def func(): #@
\"\"\"Return True.
Returns:
arg: Argument description.
\"\"\"
return True
""")
message = testutils.Message(
msg_id='malformed-returns-section',
node=node_malformed_returns_section
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_malformed_returns_section)
def test_malformed_yields_section(self):
node_malformed_yields_section = astroid.extract_node(
u"""def func(): #@
\"\"\"Yield true.
Yields:
yields: Argument description.
\"\"\"
yield True
""")
message = testutils.Message(
msg_id='malformed-yields-section',
node=node_malformed_yields_section
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_malformed_yields_section)
def test_malformed_raises_section(self):
node_malformed_raises_section = astroid.extract_node(
u"""def func(): #@
\"\"\"Raise an exception.
Raises:
Exception: Argument description.
\"\"\"
raise Exception()
""")
message = testutils.Message(
msg_id='malformed-raises-section',
node=node_malformed_raises_section
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_malformed_raises_section)
def test_malformed_args_argument(self):
node_malformed_args_argument = astroid.extract_node(
u"""def func(*args): #@
\"\"\"Does nothing.
Args:
*args: int. Argument description.
\"\"\"
a = True
""")
message = testutils.Message(
msg_id='malformed-args-argument',
node=node_malformed_args_argument
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_malformed_args_argument)
def test_well_formated_args_argument(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(*args): #@
\"\"\"Does nothing.
Args:
*args: list(*). Description.
\"\"\"
a = True
""")
with self.checker_test_object.assertAddsMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_well_formated_args_section(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Does nothing.
Args:
arg: argument. Description.
\"\"\"
a = True
""")
with self.checker_test_object.assertAddsMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_well_formated_returns_section(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(): #@
\"\"\"Does nothing.
Returns:
int. Argument escription.
\"\"\"
return args
""")
with self.checker_test_object.assertAddsMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_well_formated_yields_section(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(): #@
\"\"\"Does nothing.
Yields:
arg. Argument description.
\"\"\"
yield args
""")
with self.checker_test_object.assertAddsMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_space_after_docstring(self):
node_space_after_docstring = astroid.extract_node(
u"""def func():
\"\"\" Hello world.\"\"\"
Something
""")
message = testutils.Message(
msg_id='space-after-triple-quote',
node=node_space_after_docstring)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_space_after_docstring)
def test_two_lines_empty_docstring_raise_correct_message(self):
node_with_docstring = astroid.extract_node(
u"""def func():
\"\"\"
\"\"\"
pass
""")
message = testutils.Message(
msg_id='single-line-docstring-span-two-lines',
node=node_with_docstring)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_with_docstring)
def test_single_line_docstring_span_two_lines(self):
node_single_line_docstring_span_two_lines = astroid.extract_node(
u"""def func(): #@
\"\"\"This is a docstring.
\"\"\"
Something
""")
message = testutils.Message(
msg_id='single-line-docstring-span-two-lines',
node=node_single_line_docstring_span_two_lines)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_single_line_docstring_span_two_lines)
def test_no_period_at_end(self):
node_no_period_at_end = astroid.extract_node(
u"""def func(): #@
\"\"\"This is a docstring\"\"\"
Something
""")
message = testutils.Message(
msg_id='no-period-used',
node=node_no_period_at_end)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_no_period_at_end)
def test_empty_line_before_end_of_docstring(self):
node_empty_line_before_end = astroid.extract_node(
u"""def func(): #@
\"\"\"This is a docstring.
\"\"\"
Something
""")
message = testutils.Message(
msg_id='empty-line-before-end', node=node_empty_line_before_end)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_empty_line_before_end)
def test_no_period_at_end_of_a_multiline_docstring(self):
node_no_period_at_end = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Args:
arg: variable. Desciption
\"\"\"
Something
""")
no_period_at_end_message = testutils.Message(
msg_id='no-period-used', node=node_no_period_at_end)
malformed_args_message = testutils.Message(
msg_id='malformed-args-section', node=node_no_period_at_end)
with self.checker_test_object.assertAddsMessages(
no_period_at_end_message, malformed_args_message):
self.checker_test_object.checker.visit_functiondef(
node_no_period_at_end)
def test_no_newline_at_end_of_multi_line_docstring(self):
node_no_newline_at_end = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Args:
arg: variable. Description.\"\"\"
Something
""")
message = testutils.Message(
msg_id='no-newline-used-at-end', node=node_no_newline_at_end)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_no_newline_at_end)
def test_no_newline_above_args(self):
node_single_newline_above_args = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Do something.
Args:
arg: argument. Description.
\"\"\"
""")
message = testutils.Message(
msg_id='single-space-above-args',
node=node_single_newline_above_args)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_single_newline_above_args)
def test_no_newline_above_raises(self):
node_single_newline_above_raises = astroid.extract_node(
u"""def func(): #@
\"\"\"Raises exception.
Raises:
raises_exception. Description.
\"\"\"
raise exception
""")
message = testutils.Message(
msg_id='single-space-above-raises',
node=node_single_newline_above_raises
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_single_newline_above_raises)
def test_no_newline_above_return(self):
node_with_no_space_above_return = astroid.extract_node(
u"""def func(): #@
\"\"\"Returns something.
Returns:
returns_something. Description.
\"\"\"
return something
""")
message = testutils.Message(
msg_id='single-space-above-returns',
node=node_with_no_space_above_return
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_with_no_space_above_return)
def test_varying_combination_of_newline_above_args(self):
node_newline_above_args_raises = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Raises exception.
Args:
arg: argument. Description.
Raises:
raises_something. Description.
\"\"\"
raise exception
""")
message = testutils.Message(
msg_id='single-space-above-raises',
node=node_newline_above_args_raises
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_newline_above_args_raises)
node_newline_above_args_returns = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Returns Something.
Args:
arg: argument. Description.
Returns:
returns_something. Description.
\"\"\"
return something
""")
message = testutils.Message(
msg_id='single-space-above-returns',
node=node_newline_above_args_returns
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_newline_above_args_returns)
node_newline_above_returns_raises = astroid.extract_node(
u"""def func(): #@
\"\"\"Do something.
Raises:
raises_exception. Description.
Returns:
returns_something. Description.
\"\"\"
raise something
return something
""")
message = testutils.Message(
msg_id='single-space-above-raises',
node=node_newline_above_returns_raises
)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
node_newline_above_returns_raises)
def test_excessive_newline_above_args(self):
node_with_two_newline = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Returns something.
Args:
arg: argument. This is description.
Returns:
int. Returns something.
Yields:
yield_something. Description.
\"\"\"
return True
yield something
""")
single_space_above_args_message = testutils.Message(
msg_id='single-space-above-args',
node=node_with_two_newline
)
single_space_above_returns_message = testutils.Message(
msg_id='single-space-above-returns',
node=node_with_two_newline
)
single_space_above_yields_message = testutils.Message(
msg_id='single-space-above-yield',
node=node_with_two_newline
)
with self.checker_test_object.assertAddsMessages(
single_space_above_args_message, single_space_above_returns_message,
single_space_above_yields_message):
self.checker_test_object.checker.visit_functiondef(
node_with_two_newline)
def test_return_in_comment(self):
node_with_return_in_comment = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Returns something.
Args:
arg: argument. Description.
Returns:
returns_something. Description.
\"\"\"
"Returns: something"
return something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_return_in_comment)
def test_function_with_no_args(self):
node_with_no_args = astroid.extract_node(
u"""def func():
\"\"\"Do something.\"\"\"
a = 1 + 2
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_args)
def test_well_placed_newline(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"Returns something.
Args:
arg: argument. This is description.
Returns:
returns_something. This is description.
Raises:
raises. Something.
Yields:
yield_something. This is description.
\"\"\"
raise something
yield something
return something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_invalid_parameter_indentation_in_docstring(self):
raises_invalid_indentation_node = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Raises:
NoVariableException. Variable.
\"\"\"
Something
""")
message = testutils.Message(
msg_id='4-space-indentation-in-docstring',
node=raises_invalid_indentation_node)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
raises_invalid_indentation_node)
return_invalid_indentation_node = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Returns:
str. If :true,
individual key=value pairs.
\"\"\"
Something
""")
message = testutils.Message(
msg_id='4-space-indentation-in-docstring',
node=return_invalid_indentation_node)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
return_invalid_indentation_node)
def test_invalid_description_indentation_docstring(self):
invalid_raises_description_indentation_node = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Raises:
AssertionError. If the
schema is not valid.
\"\"\"
Something
""")
incorrect_indentation_message = testutils.Message(
msg_id='8-space-indentation-in-docstring',
node=invalid_raises_description_indentation_node)
malformed_raises_message = testutils.Message(
msg_id='malformed-raises-section',
node=invalid_raises_description_indentation_node)
with self.checker_test_object.assertAddsMessages(
incorrect_indentation_message, malformed_raises_message,
malformed_raises_message):
self.checker_test_object.checker.visit_functiondef(
invalid_raises_description_indentation_node)
invalid_return_description_indentation_node = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Returns:
str. If :true,
individual key=value pairs.
\"\"\"
return Something
""")
message = testutils.Message(
msg_id='4-space-indentation-in-docstring',
node=invalid_return_description_indentation_node)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
invalid_return_description_indentation_node)
invalid_yield_description_indentation_node = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Yields:
str. If :true,
incorrent indentation line.
\"\"\"
yield Something
""")
message = testutils.Message(
msg_id='4-space-indentation-in-docstring',
node=invalid_yield_description_indentation_node)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_functiondef(
invalid_yield_description_indentation_node)
def test_malformed_parameter_docstring(self):
invalid_parameter_name = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Raises:
Incorrect-Exception. If the
schema is not valid.
\"\"\"
Something
""")
malformed_raises_message = testutils.Message(
msg_id='malformed-raises-section',
node=invalid_parameter_name)
with self.checker_test_object.assertAddsMessages(
malformed_raises_message, malformed_raises_message):
self.checker_test_object.checker.visit_functiondef(
invalid_parameter_name)
def test_well_formed_single_line_docstring(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.\"\"\"
Something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_well_formed_multi_line_docstring(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Args:
arg: variable. Description.
\"\"\"
Something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_well_formed_multi_line_description_docstring(self):
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Args:
arg: bool. If true, individual key=value
pairs separated by '&' are
generated for each element of the value
sequence for the key.
\"\"\"
Something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Raises:
doseq. If true, individual
key=value pairs separated by '&' are
generated for each element of
the value sequence for the key
temp temp temp temp.
query. The query to be encoded.
\"\"\"
Something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
node_with_no_error_message = astroid.extract_node(
u"""def func(arg):
\"\"\"This is a docstring.
Returns:
str. The string parsed using
Jinja templating. Returns an error
string in case of error in parsing.
Yields:
tuple. For ExplorationStatsModel,
a 2-tuple of the form (exp_id, value)
where value is of the form.
\"\"\"
if True:
return Something
else:
yield something
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
node_with_no_error_message = astroid.extract_node(
u"""def func(arg): #@
\"\"\"This is a docstring.
Returns:
str. From this item there
is things:
Jinja templating. Returns an error
string in case of error in parsing.
Yields:
tuple. For ExplorationStatsModel:
{key
(sym)
}.
\"\"\"
if True:
return Something
else:
yield (a, b)
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
node_with_no_error_message)
def test_checks_args_formatting_docstring(self):
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.DocstringParameterChecker)
self.checker_test_object.setup_method()
invalid_args_description_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Incorrect description indentation
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='8-space-indentation-for-arg-in-descriptions-doc',
node=invalid_args_description_node,
args='Incorrect'
),
testutils.Message(
msg_id='malformed-args-section',
node=invalid_args_description_node,
)
):
self.checker_test_object.checker.visit_functiondef(
invalid_args_description_node)
invalid_param_indentation_node = astroid.extract_node(
"""
def func(test_var_one): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='4-space-indentation-for-arg-parameters-doc',
node=invalid_param_indentation_node,
args='test_var_one:'
),
):
self.checker_test_object.checker.visit_functiondef(
invalid_param_indentation_node)
invalid_header_indentation_node = astroid.extract_node(
"""
def func(test_var_one): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='incorrect-indentation-for-arg-header-doc',
node=invalid_header_indentation_node,
),
):
self.checker_test_object.checker.visit_functiondef(
invalid_header_indentation_node)
def test_correct_args_formatting_docstring(self):
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.DocstringParameterChecker)
self.checker_test_object.setup_method()
valid_free_form_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable:
Incorrect description indentation
{
key:
}.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
valid_free_form_node)
valid_indentation_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable:
Correct indentaion.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(
valid_indentation_node)
def test_finds_docstring_parameter(self):
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.DocstringParameterChecker)
self.checker_test_object.setup_method()
valid_func_node, valid_return_node = astroid.extract_node(
"""
def test(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(valid_func_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_return(valid_return_node)
valid_func_node, valid_yield_node = astroid.extract_node(
"""
def test(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.\"\"\"
result = test_var_one + test_var_two
yield result #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(valid_func_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_yield(valid_yield_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_return(valid_yield_node)
(
missing_yield_type_func_node,
missing_yield_type_yield_node) = astroid.extract_node(
"""
class Test(python_utils.OBJECT):
def __init__(self, test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
yield result #@
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='redundant-returns-doc',
node=missing_yield_type_func_node
),
):
self.checker_test_object.checker.visit_functiondef(
missing_yield_type_func_node)
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-yield-doc',
node=missing_yield_type_func_node
), testutils.Message(
msg_id='missing-yield-type-doc',
node=missing_yield_type_func_node
),
):
self.checker_test_object.checker.visit_yieldfrom(
missing_yield_type_yield_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_return(
missing_yield_type_yield_node)
(
missing_return_type_func_node,
missing_return_type_return_node) = astroid.extract_node(
"""
class Test(python_utils.OBJECT):
def __init__(self, test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Yields:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result #@
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='redundant-yields-doc',
node=missing_return_type_func_node
),
):
self.checker_test_object.checker.visit_functiondef(
missing_return_type_func_node)
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-return-doc',
node=missing_return_type_func_node
), testutils.Message(
msg_id='missing-return-type-doc',
node=missing_return_type_func_node
),
):
self.checker_test_object.checker.visit_return(
missing_return_type_return_node)
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_yield(
missing_return_type_return_node)
valid_raise_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two):
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Raises:
Exception. An exception.
\"\"\"
raise Exception #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
(
missing_raise_type_func_node,
missing_raise_type_raise_node) = astroid.extract_node(
"""
def func(test_var_one, test_var_two): #@
\"\"\"Function to test raising exceptions.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
\"\"\"
raise Exception #@
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-raises-doc',
args=('Exception',),
node=missing_raise_type_func_node
),
):
self.checker_test_object.checker.visit_raise(
missing_raise_type_raise_node)
valid_raise_node = astroid.extract_node(
"""
class Test(python_utils.OBJECT):
raise Exception #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
class Test():
@property
def decorator_func(self):
pass
@decorator_func.setter
@property
def func(self):
raise Exception #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
class Test():
def func(self):
raise Exception #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
def func():
try:
raise Exception #@
except Exception:
pass
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
def func():
\"\"\"Function to test raising exceptions.\"\"\"
raise Exception #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
def my_func(self):
\"\"\"This is a docstring.
:raises NameError: Never.
\"\"\"
def ex_func(val):
return RuntimeError(val)
raise ex_func('hi') #@
raise NameError('hi')
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
from unknown import Unknown
def my_func(self):
\"\"\"This is a docstring.
:raises NameError: Never.
\"\"\"
raise Unknown('hi') #@
raise NameError('hi')
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_raise_node = astroid.extract_node(
"""
def my_func(self):
\"\"\"This is a docstring.
:raises NameError: Never.
\"\"\"
def ex_func(val):
def inner_func(value):
return OSError(value)
return RuntimeError(val)
raise ex_func('hi') #@
raise NameError('hi')
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_raise(valid_raise_node)
valid_return_node = astroid.extract_node(
"""
def func():
\"\"\"Function to test return values.\"\"\"
return None #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_return(valid_return_node)
valid_return_node = astroid.extract_node(
"""
def func():
\"\"\"Function to test return values.\"\"\"
return #@
""")
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_return(valid_return_node)
missing_param_func_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two, *args, **kwargs): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-param-doc',
node=missing_param_func_node,
args=('args, kwargs',),
),
):
self.checker_test_object.checker.visit_functiondef(
missing_param_func_node)
missing_param_func_node = astroid.extract_node(
"""
def func(test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
invalid_var_name: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-param-doc',
node=missing_param_func_node,
args=('test_var_two',),
), testutils.Message(
msg_id='missing-type-doc',
node=missing_param_func_node,
args=('test_var_two',),
), testutils.Message(
msg_id='differing-param-doc',
node=missing_param_func_node,
args=('invalid_var_name',),
), testutils.Message(
msg_id='differing-type-doc',
node=missing_param_func_node,
args=('invalid_var_name',),
),
testutils.Message(
msg_id='8-space-indentation-for-arg-in-descriptions-doc',
node=missing_param_func_node,
args='invalid_var_name:'
),
):
self.checker_test_object.checker.visit_functiondef(
missing_param_func_node)
class_node, multiple_constructor_func_node = astroid.extract_node(
"""
class Test(): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
def __init__(self, test_var_one, test_var_two): #@
\"\"\"Function to test docstring parameters.
Args:
test_var_one: int. First test variable.
test_var_two: str. Second test variable.
Returns:
int. The test result.
\"\"\"
result = test_var_one + test_var_two
return result
""")
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='multiple-constructor-doc',
node=class_node,
args=(class_node.name,),
),
):
self.checker_test_object.checker.visit_functiondef(
multiple_constructor_func_node)
def test_visit_raise_warns_unknown_style(self):
self.checker_test_object.checker.config.accept_no_raise_doc = False
node = astroid.extract_node(
"""
def my_func(self):
\"\"\"This is a docstring.\"\"\"
raise RuntimeError('hi')
""")
raise_node = node.body[0]
func_node = raise_node.frame()
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='missing-raises-doc',
args=('RuntimeError',),
node=func_node
),
):
self.checker_test_object.checker.visit_raise(raise_node)
class ImportOnlyModulesCheckerTests(unittest.TestCase):
def test_finds_import_from(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.ImportOnlyModulesChecker)
checker_test_object.setup_method()
importfrom_node1 = astroid.extract_node(
"""
from os import path #@
import sys
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_importfrom(importfrom_node1)
importfrom_node2 = astroid.extract_node(
"""
from os import error #@
import sys
""")
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='import-only-modules',
node=importfrom_node2,
args=('error', 'os')
),
):
checker_test_object.checker.visit_importfrom(
importfrom_node2)
importfrom_node3 = astroid.extract_node(
"""
from invalid_module import invalid_module #@
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_importfrom(importfrom_node3)
importfrom_node4 = astroid.extract_node(
"""
from constants import constants #@
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_importfrom(importfrom_node4)
importfrom_node5 = astroid.extract_node(
"""
from os import invalid_module #@
""")
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='import-only-modules',
node=importfrom_node5,
args=('invalid_module', 'os')
),
):
checker_test_object.checker.visit_importfrom(importfrom_node5)
importfrom_node6 = astroid.extract_node(
"""
from .constants import constants #@
""", module_name='.constants')
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_importfrom(importfrom_node6)
class BackslashContinuationCheckerTests(unittest.TestCase):
def test_finds_backslash_continuation(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.BackslashContinuationChecker)
checker_test_object.setup_method()
node = astroid.scoped_nodes.Module(name='test', doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""message1 = 'abc'\\\n""" # pylint: disable=backslash-continuation
"""'cde'\\\n""" # pylint: disable=backslash-continuation
"""'xyz'
message2 = 'abc\\\\'
message3 = (
'abc\\\\'
'xyz\\\\'
)
""")
node.file = filename
node.path = filename
checker_test_object.checker.process_module(node)
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='backslash-continuation',
line=1
),
testutils.Message(
msg_id='backslash-continuation',
line=2
),
):
temp_file.close()
class FunctionArgsOrderCheckerTests(unittest.TestCase):
def test_finds_function_def(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.FunctionArgsOrderChecker)
checker_test_object.setup_method()
functiondef_node1 = astroid.extract_node(
"""
def test(self,test_var_one, test_var_two): #@
result = test_var_one + test_var_two
return result
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_functiondef(functiondef_node1)
functiondef_node2 = astroid.extract_node(
"""
def test(test_var_one, test_var_two, self): #@
result = test_var_one + test_var_two
return result
""")
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='function-args-order-self',
node=functiondef_node2
),
):
checker_test_object.checker.visit_functiondef(functiondef_node2)
functiondef_node3 = astroid.extract_node(
"""
def test(test_var_one, test_var_two, cls): #@
result = test_var_one + test_var_two
return result
""")
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='function-args-order-cls',
node=functiondef_node3
),
):
checker_test_object.checker.visit_functiondef(functiondef_node3)
class RestrictedImportCheckerTests(unittest.TestCase):
def setUp(self):
super(RestrictedImportCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.RestrictedImportChecker)
self.checker_test_object.setup_method()
# The spaces are included on purpose so that we properly test
# the input sanitization.
self.checker_test_object.checker.config.forbidden_imports = (
' core.storage: core.domain ',
'core.domain : core.controllers',
'core.controllers: core.platform | core.storage '
)
self.checker_test_object.checker.open()
def test_forbid_domain_import_in_storage_module(self):
node_err_import = astroid.extract_node(
"""
import core.domain.activity_domain #@
""")
node_err_import.root().name = 'oppia.core.storage.topic'
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_err_import,
args=('domain', 'storage'),
),
):
self.checker_test_object.checker.visit_import(node_err_import)
def test_allow_platform_import_in_storage_module(self):
node_no_err_import = astroid.extract_node(
"""
import core.platform.email.mailgun_email_services #@
""")
node_no_err_import.root().name = 'oppia.core.storage.topic'
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_import(node_no_err_import)
def test_forbid_domain_from_import_in_storage_module(self):
node_err_importfrom = astroid.extract_node(
"""
from core.domain import activity_domain #@
""")
node_err_importfrom.root().name = 'oppia.core.storage.topic'
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_err_importfrom,
args=('domain', 'storage'),
)
):
self.checker_test_object.checker.visit_importfrom(
node_err_importfrom)
def test_allow_platform_from_import_in_storage_module(self):
node_no_err_importfrom = astroid.extract_node(
"""
from core.platform.email import mailgun_email_services #@
""")
node_no_err_importfrom.root().name = 'oppia.core.storage.topicl'
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_importfrom(
node_no_err_importfrom)
def test_forbid_controllers_import_in_domain_module(self):
node_err_import = astroid.extract_node(
"""
import core.controllers.acl_decorators #@
""")
node_err_import.root().name = 'oppia.core.domain'
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_err_import,
args=('controllers', 'domain'),
),
):
self.checker_test_object.checker.visit_import(node_err_import)
def test_allow_platform_import_in_domain_module(self):
node_no_err_import = astroid.extract_node(
"""
import core.platform.email.mailgun_email_services_test #@
""")
node_no_err_import.root().name = 'oppia.core.domain'
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_import(node_no_err_import)
def test_forbid_controllers_from_import_in_domain_module(self):
node_err_importfrom = astroid.extract_node(
"""
from core.controllers import acl_decorators #@
""")
node_err_importfrom.root().name = 'oppia.core.domain'
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_err_importfrom,
args=('controllers', 'domain'),
)
):
self.checker_test_object.checker.visit_importfrom(
node_err_importfrom)
def test_allow_platform_from_import_in_domain_module(self):
node_no_err_importfrom = astroid.extract_node(
"""
from core.platform.email import mailgun_email_services_test #@
""")
node_no_err_importfrom.root().name = 'oppia.core.domain'
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_importfrom(
node_no_err_importfrom)
def test_forbid_platform_import_in_controllers_module(self):
node_err_import = astroid.extract_node(
"""
import core.platform #@
""")
node_err_import.root().name = 'oppia.core.controllers.controller'
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_err_import,
args=('platform', 'controllers'),
)
):
self.checker_test_object.checker.visit_import(node_err_import)
def test_forbid_storage_import_in_controllers_module(self):
node_err_import = astroid.extract_node(
"""
import core.storage #@
""")
node_err_import.root().name = 'oppia.core.controllers.controller'
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_err_import,
args=('storage', 'controllers'),
)
):
self.checker_test_object.checker.visit_import(node_err_import)
def test_allow_domain_import_in_controllers_module(self):
node_no_err_import = astroid.extract_node(
"""
import core.domain #@
""")
node_no_err_import.root().name = 'oppia.core.controllers.controller'
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_import(node_no_err_import)
def test_forbid_platform_from_import_in_controllers_module(self):
node_no_err_importfrom = astroid.extract_node(
"""
from core.platform import models #@
""")
node_no_err_importfrom.root().name = 'oppia.core.controllers.controller'
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_no_err_importfrom,
args=('platform', 'controllers'),
)
):
self.checker_test_object.checker.visit_importfrom(
node_no_err_importfrom)
def test_forbid_storage_from_import_in_controllers_module(self):
node_no_err_importfrom = astroid.extract_node(
"""
from core.storage.user import gae_models as user_models #@
""")
node_no_err_importfrom.root().name = 'oppia.core.controllers.controller'
with self.checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='invalid-import',
node=node_no_err_importfrom,
args=('storage', 'controllers'),
)
):
self.checker_test_object.checker.visit_importfrom(
node_no_err_importfrom)
def test_allow_domain_from_import_in_controllers_module(self):
node_no_err_importfrom = astroid.extract_node(
"""
from core.domain import user_services #@
""")
node_no_err_importfrom.root().name = 'oppia.core.controllers.controller'
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_importfrom(
node_no_err_importfrom)
class SingleCharAndNewlineAtEOFCheckerTests(unittest.TestCase):
def test_checks_single_char_and_newline_eof(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.SingleCharAndNewlineAtEOFChecker)
checker_test_object.setup_method()
node_missing_newline_at_eof = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""c = 'something dummy'
""")
node_missing_newline_at_eof.file = filename
node_missing_newline_at_eof.path = filename
checker_test_object.checker.process_module(node_missing_newline_at_eof)
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='newline-at-eof',
line=2
),
):
temp_file.close()
node_single_char_file = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(u"""1""")
node_single_char_file.file = filename
node_single_char_file.path = filename
checker_test_object.checker.process_module(node_single_char_file)
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='only-one-character',
line=1
),
):
temp_file.close()
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(u"""x = 'something dummy'""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
checker_test_object.checker.process_module(node_with_no_error_message)
with checker_test_object.assertNoMessages():
temp_file.close()
class DivisionOperatorCheckerTests(unittest.TestCase):
def setUp(self):
super(DivisionOperatorCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.DivisionOperatorChecker)
self.checker_test_object.setup_method()
def test_division_operator_with_spaces(self):
node_division_operator_with_spaces = astroid.extract_node(
u"""
a / b #@
""")
message = testutils.Message(
msg_id='division-operator-used',
node=node_division_operator_with_spaces)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_binop(
node_division_operator_with_spaces)
def test_division_operator_without_spaces(self):
node_division_operator_without_spaces = astroid.extract_node(
u"""
a/b #@
""")
message = testutils.Message(
msg_id='division-operator-used',
node=node_division_operator_without_spaces)
with self.checker_test_object.assertAddsMessages(message):
self.checker_test_object.checker.visit_binop(
node_division_operator_without_spaces)
class SingleLineCommentCheckerTests(unittest.TestCase):
def setUp(self):
super(SingleLineCommentCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.SingleLineCommentChecker)
self.checker_test_object.setup_method()
def test_invalid_punctuation(self):
node_invalid_punctuation = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# This is a multiline
# comment/
# Comment.
""")
node_invalid_punctuation.file = filename
node_invalid_punctuation.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_invalid_punctuation))
message = testutils.Message(
msg_id='invalid-punctuation-used',
line=2)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_no_space_at_beginning(self):
node_no_space_at_beginning = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""#Something.
""")
node_no_space_at_beginning.file = filename
node_no_space_at_beginning.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_no_space_at_beginning))
message = testutils.Message(
msg_id='no-space-at-beginning',
line=1)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_no_capital_letter_at_beginning(self):
node_no_capital_letter_at_beginning = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# something.
""")
node_no_capital_letter_at_beginning.file = filename
node_no_capital_letter_at_beginning.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_no_capital_letter_at_beginning))
message = testutils.Message(
msg_id='no-capital-letter-at-beginning',
line=3)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_comment_with_excluded_phrase(self):
node_comment_with_excluded_phrase = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# pylint: disable
a = 1 + 2 # pylint: disable
""")
node_comment_with_excluded_phrase.file = filename
node_comment_with_excluded_phrase.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_comment_with_excluded_phrase))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_variable_name_in_comment(self):
node_variable_name_in_comment = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# variable_name is used.
""")
node_variable_name_in_comment.file = filename
node_variable_name_in_comment.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_variable_name_in_comment))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_comment_with_version_info(self):
node_comment_with_version_info = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# v2 is used.
""")
node_comment_with_version_info.file = filename
node_comment_with_version_info.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_comment_with_version_info))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_data_type_in_comment(self):
node_data_type_in_comment = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# str. variable is type of str.
""")
node_data_type_in_comment.file = filename
node_data_type_in_comment.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_data_type_in_comment))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_comment_inside_docstring(self):
node_comment_inside_docstring = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
\"\"\"# str. variable is type of str.\"\"\"
\"\"\"# str. variable is type
of str.\"\"\"
""")
node_comment_inside_docstring.file = filename
node_comment_inside_docstring.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_comment_inside_docstring))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_well_formed_comment(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""# coding: utf-8
# Multi
# line
# comment.
""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_with_no_error_message))
with self.checker_test_object.assertNoMessages():
temp_file.close()
class BlankLineBelowFileOverviewCheckerTests(unittest.TestCase):
def setUp(self):
super(BlankLineBelowFileOverviewCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.BlankLineBelowFileOverviewChecker)
self.checker_test_object.setup_method()
def test_no_empty_line_below_fileoverview(self):
node_no_empty_line_below_fileoverview = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
\"\"\" this file does something \"\"\"
import something
import random
""")
node_no_empty_line_below_fileoverview.file = filename
node_no_empty_line_below_fileoverview.path = filename
node_no_empty_line_below_fileoverview.fromlineno = 2
self.checker_test_object.checker.visit_module(
node_no_empty_line_below_fileoverview)
message = testutils.Message(
msg_id='no-empty-line-provided-below-fileoverview',
node=node_no_empty_line_below_fileoverview)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_extra_empty_lines_below_fileoverview(self):
node_extra_empty_lines_below_fileoverview = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
\"\"\" this file does something \"\"\"
import something
from something import random
""")
node_extra_empty_lines_below_fileoverview.file = filename
node_extra_empty_lines_below_fileoverview.path = filename
node_extra_empty_lines_below_fileoverview.fromlineno = 2
self.checker_test_object.checker.visit_module(
node_extra_empty_lines_below_fileoverview)
message = testutils.Message(
msg_id='only-a-single-empty-line-should-be-provided',
node=node_extra_empty_lines_below_fileoverview)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_extra_empty_lines_below_fileoverview_with_unicode_characters(self):
node_extra_empty_lines_below_fileoverview = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
#this comment has a unicode character \u2713
\"\"\" this file does \u2715 something \"\"\"
from something import random
""")
node_extra_empty_lines_below_fileoverview.file = filename
node_extra_empty_lines_below_fileoverview.path = filename
node_extra_empty_lines_below_fileoverview.fromlineno = 3
self.checker_test_object.checker.visit_module(
node_extra_empty_lines_below_fileoverview)
message = testutils.Message(
msg_id='only-a-single-empty-line-should-be-provided',
node=node_extra_empty_lines_below_fileoverview)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_no_empty_line_below_fileoverview_with_unicode_characters(self):
node_no_empty_line_below_fileoverview = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
#this comment has a unicode character \u2713
\"\"\" this file does \u2715 something \"\"\"
import something
import random
""")
node_no_empty_line_below_fileoverview.file = filename
node_no_empty_line_below_fileoverview.path = filename
node_no_empty_line_below_fileoverview.fromlineno = 3
self.checker_test_object.checker.visit_module(
node_no_empty_line_below_fileoverview)
message = testutils.Message(
msg_id='no-empty-line-provided-below-fileoverview',
node=node_no_empty_line_below_fileoverview)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_single_new_line_below_file_overview(self):
node_with_no_error_message = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
\"\"\" this file does something \"\"\"
import something
import random
""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
node_with_no_error_message.fromlineno = 2
self.checker_test_object.checker.visit_module(
node_with_no_error_message)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_file_with_no_file_overview(self):
node_file_with_no_file_overview = astroid.scoped_nodes.Module(
name='test',
doc=None)
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
import something
import random
""")
node_file_with_no_file_overview.file = filename
node_file_with_no_file_overview.path = filename
self.checker_test_object.checker.visit_module(
node_file_with_no_file_overview)
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_file_overview_at_end_of_file(self):
node_file_overview_at_end_of_file = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
\"\"\" this file does something \"\"\" """)
node_file_overview_at_end_of_file.file = filename
node_file_overview_at_end_of_file.path = filename
node_file_overview_at_end_of_file.fromlineno = 2
self.checker_test_object.checker.visit_module(
node_file_overview_at_end_of_file)
message = testutils.Message(
msg_id='only-a-single-empty-line-should-be-provided',
node=node_file_overview_at_end_of_file)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
class SingleLinePragmaCheckerTests(unittest.TestCase):
def setUp(self):
super(SingleLinePragmaCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.SingleLinePragmaChecker)
self.checker_test_object.setup_method()
def test_pragma_for_multiline(self):
node_pragma_for_multiline = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
# pylint: disable=invalid-name
def funcName():
\"\"\" # pylint: disable=test-purpose\"\"\"
pass
# pylint: enable=invalid-name
""")
node_pragma_for_multiline.file = filename
node_pragma_for_multiline.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_pragma_for_multiline))
message1 = testutils.Message(
msg_id='single-line-pragma',
line=2)
message2 = testutils.Message(
msg_id='single-line-pragma',
line=6)
with self.checker_test_object.assertAddsMessages(
message1, message2):
temp_file.close()
def test_enable_single_line_pragma_for_multiline(self):
node_enable_single_line_pragma_for_multiline = (
astroid.scoped_nodes.Module(name='test', doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
# pylint: disable=single-line-pragma
def func():
\"\"\"
# pylint: disable=testing-purpose
\"\"\"
pass
# pylint: enable=single-line-pragma
""")
node_enable_single_line_pragma_for_multiline.file = filename
node_enable_single_line_pragma_for_multiline.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_enable_single_line_pragma_for_multiline))
message = testutils.Message(
msg_id='single-line-pragma',
line=2)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_enable_single_line_pragma_with_invalid_name(self):
node_enable_single_line_pragma_with_invalid_name = (
astroid.scoped_nodes.Module(name='test', doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
# pylint: disable=invalid-name, single-line-pragma
def funcName():
\"\"\"
# pylint: disable=testing-purpose
\"\"\"
pass
# pylint: enable=invalid_name, single-line-pragma
""")
node_enable_single_line_pragma_with_invalid_name.file = filename
node_enable_single_line_pragma_with_invalid_name.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(
node_enable_single_line_pragma_with_invalid_name))
message = testutils.Message(
msg_id='single-line-pragma',
line=2)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
def test_single_line_pylint_pragma(self):
node_with_no_error_message = (
astroid.scoped_nodes.Module(name='test', doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
def funcName(): # pylint: disable=single-line-pragma
pass
""")
node_with_no_error_message.file = filename
node_with_no_error_message.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_with_no_error_message))
with self.checker_test_object.assertNoMessages():
temp_file.close()
def test_no_and_extra_space_before_pylint(self):
node_no_and_extra_space_before_pylint = (
astroid.scoped_nodes.Module(name='test', doc='Custom test'))
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
# pylint:disable=single-line-pragma
def func():
\"\"\"
# pylint: disable=testing-purpose
\"\"\"
pass
# pylint: enable=single-line-pragma
""")
node_no_and_extra_space_before_pylint.file = filename
node_no_and_extra_space_before_pylint.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_no_and_extra_space_before_pylint))
message = testutils.Message(
msg_id='single-line-pragma',
line=2)
with self.checker_test_object.assertAddsMessages(message):
temp_file.close()
class SingleSpaceAfterKeyWordCheckerTests(unittest.TestCase):
def setUp(self):
super(SingleSpaceAfterKeyWordCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.SingleSpaceAfterKeyWordChecker)
self.checker_test_object.setup_method()
def test_no_space_after_keyword(self):
node_no_space_after_keyword = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
if(False):
pass
elif(True):
pass
while(True):
pass
yield(1)
return True if(True) else False
""")
node_no_space_after_keyword.file = filename
node_no_space_after_keyword.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_no_space_after_keyword))
if_message = testutils.Message(
msg_id='single-space-after-keyword', args=('if'), line=2)
elif_message = testutils.Message(
msg_id='single-space-after-keyword', args=('elif'), line=4)
while_message = testutils.Message(
msg_id='single-space-after-keyword', args=('while'), line=6)
yield_message = testutils.Message(
msg_id='single-space-after-keyword', args=('yield'), line=8)
if_exp_message = testutils.Message(
msg_id='single-space-after-keyword', args=('if'), line=9)
with self.checker_test_object.assertAddsMessages(
if_message, elif_message, while_message, yield_message,
if_exp_message):
temp_file.close()
def test_multiple_spaces_after_keyword(self):
node_multiple_spaces_after_keyword = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
if False:
pass
elif True:
pass
while True:
pass
yield 1
return True if True else False
""")
node_multiple_spaces_after_keyword.file = filename
node_multiple_spaces_after_keyword.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_multiple_spaces_after_keyword))
if_message = testutils.Message(
msg_id='single-space-after-keyword', args=('if'), line=2)
elif_message = testutils.Message(
msg_id='single-space-after-keyword', args=('elif'), line=4)
while_message = testutils.Message(
msg_id='single-space-after-keyword', args=('while'), line=6)
yield_message = testutils.Message(
msg_id='single-space-after-keyword', args=('yield'), line=8)
if_exp_message = testutils.Message(
msg_id='single-space-after-keyword', args=('if'), line=9)
with self.checker_test_object.assertAddsMessages(
if_message, elif_message, while_message, yield_message,
if_exp_message):
temp_file.close()
def test_single_space_after_keyword(self):
node_single_space_after_keyword = astroid.scoped_nodes.Module(
name='test',
doc='Custom test')
temp_file = tempfile.NamedTemporaryFile()
filename = temp_file.name
with python_utils.open_file(filename, 'w') as tmp:
tmp.write(
u"""
if False:
pass
elif True:
pass
while True:
pass
yield 1
return True if True else False
""")
node_single_space_after_keyword.file = filename
node_single_space_after_keyword.path = filename
self.checker_test_object.checker.process_tokens(
utils.tokenize_module(node_single_space_after_keyword))
with self.checker_test_object.assertNoMessages():
temp_file.close()
class InequalityWithNoneCheckerTests(unittest.TestCase):
def setUp(self):
super(InequalityWithNoneCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.InequalityWithNoneChecker)
self.checker_test_object.setup_method()
def test_inequality_op_on_none_adds_message(self):
if_node = astroid.extract_node(
"""
if x != None: #@
pass
"""
)
compare_node = if_node.test
not_equal_none_message = testutils.Message(
msg_id='inequality-with-none', node=compare_node)
with self.checker_test_object.assertAddsMessages(
not_equal_none_message
):
self.checker_test_object.checker.visit_compare(compare_node)
def test_inequality_op_on_none_with_wrapped_none_adds_message(self):
if_node = astroid.extract_node(
"""
if x != ( #@
None
):
pass
"""
)
compare_node = if_node.test
not_equal_none_message = testutils.Message(
msg_id='inequality-with-none', node=compare_node)
with self.checker_test_object.assertAddsMessages(
not_equal_none_message
):
self.checker_test_object.checker.visit_compare(compare_node)
def test_usage_of_is_not_on_none_does_not_add_message(self):
if_node = astroid.extract_node(
"""
if x is not None: #@
pass
"""
)
compare_node = if_node.test
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_compare(compare_node)
class DisallowedFunctionsCheckerTests(unittest.TestCase):
"""Unit tests for DisallowedFunctionsChecker"""
def setUp(self):
super(DisallowedFunctionsCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.DisallowedFunctionsChecker)
self.checker_test_object.setup_method()
def test_disallowed_removals_str(self):
(
self.checker_test_object
.checker.config.disallowed_functions_and_replacements_str) = [
b'example_func',
b'a.example_attr',
]
self.checker_test_object.checker.open()
call1, call2 = astroid.extract_node(
"""
example_func() #@
a.example_attr() #@
""")
message_remove_example_func = testutils.Message(
msg_id='remove-disallowed-function-calls',
node=call1,
args=b'example_func'
)
message_remove_example_attr = testutils.Message(
msg_id='remove-disallowed-function-calls',
node=call2,
args=b'a.example_attr'
)
with self.checker_test_object.assertAddsMessages(
message_remove_example_func,
message_remove_example_attr):
self.checker_test_object.checker.visit_call(call1)
self.checker_test_object.checker.visit_call(call2)
def test_disallowed_replacements_str(self):
(
self.checker_test_object
.checker.config.disallowed_functions_and_replacements_str) = [
b'datetime.datetime.now=>datetime.datetime.utcnow',
b'self.assertEquals=>self.assertEqual',
b'b.next=>python_utils.NEXT',
b'str=>python_utils.convert_to_bytes or python_utils.UNICODE',
]
self.checker_test_object.checker.open()
(
call1, call2, call3,
call4, call5
) = astroid.extract_node(
"""
datetime.datetime.now() #@
self.assertEquals() #@
str(1) #@
b.next() #@
b.a.next() #@
""")
message_replace_disallowed_datetime = testutils.Message(
msg_id='replace-disallowed-function-calls',
node=call1,
args=(
b'datetime.datetime.now',
b'datetime.datetime.utcnow')
)
message_replace_disallowed_assert_equals = testutils.Message(
msg_id='replace-disallowed-function-calls',
node=call2,
args=(
b'self.assertEquals',
b'self.assertEqual')
)
message_replace_disallowed_str = testutils.Message(
msg_id='replace-disallowed-function-calls',
node=call3,
args=(
b'str',
b'python_utils.convert_to_bytes or python_utils.UNICODE')
)
message_replace_disallowed_next = testutils.Message(
msg_id='replace-disallowed-function-calls',
node=call4,
args=(
b'b.next',
b'python_utils.NEXT')
)
with self.checker_test_object.assertAddsMessages(
message_replace_disallowed_datetime,
message_replace_disallowed_assert_equals,
message_replace_disallowed_str,
message_replace_disallowed_next):
self.checker_test_object.checker.visit_call(call1)
self.checker_test_object.checker.visit_call(call2)
self.checker_test_object.checker.visit_call(call3)
self.checker_test_object.checker.visit_call(call4)
self.checker_test_object.checker.visit_call(call5)
def test_disallowed_removals_regex(self):
(
self.checker_test_object
.checker.config.disallowed_functions_and_replacements_regex) = [
r'.*example_func',
r'.*\..*example_attr'
]
self.checker_test_object.checker.open()
call1, call2 = astroid.extract_node(
"""
somethingexample_func() #@
c.someexample_attr() #@
""")
message_remove_example_func = testutils.Message(
msg_id='remove-disallowed-function-calls',
node=call1,
args=b'somethingexample_func'
)
message_remove_example_attr = testutils.Message(
msg_id='remove-disallowed-function-calls',
node=call2,
args=b'c.someexample_attr'
)
with self.checker_test_object.assertAddsMessages(
message_remove_example_func,
message_remove_example_attr):
self.checker_test_object.checker.visit_call(call1)
self.checker_test_object.checker.visit_call(call2)
def test_disallowed_replacements_regex(self):
(
self.checker_test_object
.checker.config.disallowed_functions_and_replacements_regex) = [
r'.*example_func=>other_func',
r'.*\.example_attr=>other_attr',
]
self.checker_test_object.checker.open()
call1, call2, call3, call4 = astroid.extract_node(
"""
somethingexample_func() #@
d.example_attr() #@
d.example_attr() #@
d.b.example_attr() #@
""")
message_replace_example_func = testutils.Message(
msg_id='replace-disallowed-function-calls',
node=call1,
args=(b'somethingexample_func', b'other_func')
)
message_replace_example_attr1 = testutils.Message(
msg_id='replace-disallowed-function-calls',
node=call2,
args=(b'd.example_attr', b'other_attr')
)
message_replace_example_attr2 = testutils.Message(
msg_id='replace-disallowed-function-calls',
node=call3,
args=(b'd.example_attr', b'other_attr')
)
message_replace_example_attr3 = testutils.Message(
msg_id='replace-disallowed-function-calls',
node=call4,
args=(b'd.b.example_attr', b'other_attr')
)
with self.checker_test_object.assertAddsMessages(
message_replace_example_func,
message_replace_example_attr1,
message_replace_example_attr2,
message_replace_example_attr3):
self.checker_test_object.checker.visit_call(call1)
self.checker_test_object.checker.visit_call(call2)
self.checker_test_object.checker.visit_call(call3)
self.checker_test_object.checker.visit_call(call4)
class NonTestFilesFunctionNameCheckerTests(unittest.TestCase):
def setUp(self):
super(NonTestFilesFunctionNameCheckerTests, self).setUp()
self.checker_test_object = testutils.CheckerTestCase()
self.checker_test_object.CHECKER_CLASS = (
pylint_extensions.NonTestFilesFunctionNameChecker)
self.checker_test_object.setup_method()
def test_function_def_for_test_file_with_test_only_adds_no_msg(self):
def_node = astroid.extract_node(
"""
def test_only_some_random_function(param1, param2):
pass
"""
)
def_node.root().name = 'random_module_test'
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(def_node)
def test_function_def_for_test_file_without_test_only_adds_no_msg(self):
def_node = astroid.extract_node(
"""
def some_random_function(param1, param2):
pass
"""
)
def_node.root().name = 'random_module_test'
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(def_node)
def test_function_def_for_non_test_file_with_test_only_adds_msg(self):
def_node = astroid.extract_node(
"""
def test_only_some_random_function(param1, param2):
pass
"""
)
def_node.root().name = 'random_module_nontest'
non_test_function_name_message = testutils.Message(
msg_id='non-test-files-function-name-checker', node=def_node)
with self.checker_test_object.assertAddsMessages(
non_test_function_name_message
):
self.checker_test_object.checker.visit_functiondef(def_node)
def test_function_def_for_non_test_file_without_test_only_adds_no_msg(self):
def_node = astroid.extract_node(
"""
def some_random_function(param1, param2):
pass
"""
)
def_node.root().name = 'random_module_nontest'
with self.checker_test_object.assertNoMessages():
self.checker_test_object.checker.visit_functiondef(def_node)
class DisallowDunderMetaclassCheckerTests(unittest.TestCase):
def test_wrong_metaclass_usage_raises_error(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.DisallowDunderMetaclassChecker)
checker_test_object.setup_method()
metaclass_node = astroid.extract_node(
"""
class FakeClass(python_utils.OBJECT):
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self, name):
yield (name, name)
class MyObject: #@
__metaclass__ = FakeClass
def __init__(self, fake_arg):
self.fake_arg = fake_arg
""")
with checker_test_object.assertAddsMessages(
testutils.Message(
msg_id='no-dunder-metaclass',
node=metaclass_node
)
):
checker_test_object.checker.visit_classdef(metaclass_node)
def test_no_metaclass_usage_raises_no_error(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.DisallowDunderMetaclassChecker)
checker_test_object.setup_method()
metaclass_node = astroid.extract_node(
"""
class MyObject: #@
def __init__(self, fake_arg):
self.fake_arg = fake_arg
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_classdef(metaclass_node)
def test_correct_metaclass_usage_raises_no_error(self):
checker_test_object = testutils.CheckerTestCase()
checker_test_object.CHECKER_CLASS = (
pylint_extensions.DisallowDunderMetaclassChecker)
checker_test_object.setup_method()
metaclass_node = astroid.extract_node(
"""
class FakeClass(python_utils.OBJECT):
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self, name):
yield (name, name)
class MyObject: #@
python_utils.with_metaclass(FakeClass)
def __init__(self, fake_arg):
self.fake_arg = fake_arg
""")
with checker_test_object.assertNoMessages():
checker_test_object.checker.visit_classdef(metaclass_node)
| 36.143584
| 84
| 0.590724
|
794e99520d8e1818d9ddc1f6af065c43ffada72e
| 1,302
|
py
|
Python
|
configs/configuration_customroberta.py
|
haodingkui/semeval2020-task5-subtask1
|
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
|
[
"MIT"
] | 2
|
2020-08-19T12:32:21.000Z
|
2021-11-08T15:50:08.000Z
|
configs/configuration_customroberta.py
|
haodingkui/semeval2020-task5-subtask1
|
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
|
[
"MIT"
] | null | null | null |
configs/configuration_customroberta.py
|
haodingkui/semeval2020-task5-subtask1
|
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
|
[
"MIT"
] | 1
|
2020-08-19T12:32:48.000Z
|
2020-08-19T12:32:48.000Z
|
from .configuration_custombert import CustomBertConfig
class CustomRobertaConfig(CustomBertConfig):
model_type = "roberta"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.2,
high_hidden_dropout_prob=0.5,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs
):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.high_hidden_dropout_prob = high_hidden_dropout_prob
| 33.384615
| 72
| 0.697389
|
794e999f6c7cb1a802a5d1be3239924b6186bea7
| 11,465
|
py
|
Python
|
reinhard/components/basic.py
|
FasterSpeeding/Reinhard
|
c665598f2079576f9cc97b7987f463b7963950d9
|
[
"BSD-3-Clause"
] | 10
|
2020-11-24T19:08:40.000Z
|
2022-03-03T07:17:41.000Z
|
reinhard/components/basic.py
|
FasterSpeeding/Reinhard
|
c665598f2079576f9cc97b7987f463b7963950d9
|
[
"BSD-3-Clause"
] | 34
|
2021-05-07T01:36:14.000Z
|
2022-03-31T08:15:14.000Z
|
reinhard/components/basic.py
|
FasterSpeeding/Reinhard
|
c665598f2079576f9cc97b7987f463b7963950d9
|
[
"BSD-3-Clause"
] | 1
|
2021-03-07T12:12:21.000Z
|
2021-03-07T12:12:21.000Z
|
# -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2021, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
__all__: list[str] = ["basic_component", "load_basic", "unload_basic"]
import collections.abc as collections
import datetime
import itertools
import math
import platform
import time
import hikari
import psutil
import tanjun
import yuyo
from hikari import snowflakes
from .. import utility
def gen_help_embeds(
ctx: tanjun.abc.Context = tanjun.inject(type=tanjun.abc.Context),
client: tanjun.abc.Client = tanjun.inject(type=tanjun.abc.Client),
) -> dict[str, list[hikari.Embed]]:
prefix = next(iter(client.prefixes)) if client and client.prefixes else ""
help_embeds: dict[str, list[hikari.Embed]] = {}
for component in ctx.client.components:
if value := utility.generate_help_embeds(component, prefix=prefix):
help_embeds[value[0].lower()] = [v for v in value[1]]
return help_embeds
basic_component = tanjun.Component(name="basic", strict=True)
@basic_component.with_slash_command
@tanjun.as_slash_command("about", "Get basic information about the current bot instance.")
async def about_command(
ctx: tanjun.abc.Context,
process: psutil.Process = tanjun.cached_inject(psutil.Process),
) -> None:
"""Get basic information about the current bot instance."""
start_date = datetime.datetime.fromtimestamp(process.create_time())
uptime = datetime.datetime.now() - start_date
memory_usage: float = process.memory_full_info().uss / 1024 ** 2
cpu_usage: float = process.cpu_percent() / psutil.cpu_count()
memory_percent: float = process.memory_percent()
if ctx.shards:
shard_id = snowflakes.calculate_shard_id(ctx.shards.shard_count, ctx.guild_id) if ctx.guild_id else 0
name = f"Reinhard: Shard {shard_id} of {ctx.shards.shard_count}"
else:
name = "Reinhard: REST Server"
description = (
"An experimental pythonic Hikari bot.\n "
"The source can be found on [Github](https://github.com/FasterSpeeding/Reinhard)."
)
embed = (
hikari.Embed(description=description, colour=utility.embed_colour())
.set_author(name=name, url=hikari.__url__)
.add_field(name="Uptime", value=str(uptime), inline=True)
.add_field(
name="Process",
value=f"{memory_usage:.2f} MB ({memory_percent:.0f}%)\n{cpu_usage:.2f}% CPU",
inline=True,
)
.set_footer(
icon="http://i.imgur.com/5BFecvA.png",
text=f"Made with Hikari v{hikari.__version__} (python {platform.python_version()})",
)
)
error_manager = utility.HikariErrorManager(break_on=(hikari.NotFoundError, hikari.ForbiddenError))
await error_manager.try_respond(ctx, embed=embed)
@basic_component.with_message_command
@tanjun.as_message_command("help")
async def help_command(ctx: tanjun.abc.Context) -> None:
await ctx.respond("See the slash command menu")
# @basic_component.with_message_command
# @tanjun.with_greedy_argument("command_name", default=None)
# @tanjun.with_option("component_name", "--component", default=None)
# @tanjun.with_parser
# # TODO: specify a group or command
# @tanjun.as_message_command("help")
async def old_help_command(
ctx: tanjun.abc.Context,
command_name: str | None,
component_name: str | None,
component_client: yuyo.ComponentClient = tanjun.inject(type=yuyo.ComponentClient),
help_embeds: dict[str, list[hikari.Embed]] = tanjun.cached_inject(gen_help_embeds),
) -> None:
"""Get information about the commands in this bot.
Arguments
* command name: Optional greedy argument of a name to get a command's documentation by.
Options
* component name (--component): Name of a component to get the documentation for.
"""
if command_name is not None:
for own_prefix in ctx.client.prefixes:
if command_name.startswith(own_prefix):
command_name = command_name[len(own_prefix) :]
break
prefix = next(iter(ctx.client.prefixes)) if ctx.client.prefixes else ""
for _, command in ctx.client.check_message_name(command_name):
if command_embed := utility.generate_command_embed(command, prefix=prefix):
await ctx.respond(embed=command_embed)
break
else:
await ctx.respond(f"Couldn't find `{command_name}` command.")
return
if component_name:
if component_name.lower() not in help_embeds:
raise tanjun.CommandError(f"Couldn't find component `{component_name}`")
embed_generator = ((hikari.UNDEFINED, embed) for embed in help_embeds[component_name.lower()])
else:
embed_generator = (
(hikari.UNDEFINED, embed) for embed in itertools.chain.from_iterable(list(help_embeds.values()))
)
paginator = yuyo.ComponentPaginator(embed_generator, authors=(ctx.author,))
if first_entry := await paginator.get_next_entry():
content, embed = first_entry
message = await ctx.respond(content=content, embed=embed, component=paginator, ensure_result=True)
component_client.set_executor(message, paginator)
@basic_component.with_slash_command
@tanjun.as_slash_command("ping", "Get the bot's current delay.")
async def ping_command(ctx: tanjun.abc.Context, /) -> None:
"""Get the bot's current delay."""
start_time = time.perf_counter()
await ctx.rest.fetch_my_user()
time_taken = (time.perf_counter() - start_time) * 1_000
heartbeat_latency = ctx.shards.heartbeat_latency * 1_000 if ctx.shards else float("NAN")
await ctx.respond(f"PONG\n - REST: {time_taken:.0f}ms\n - Gateway: {heartbeat_latency:.0f}ms")
_about_lines: list[tuple[str, collections.Callable[[hikari.api.Cache], int]]] = [
("Guild channels: {0}", lambda c: len(c.get_guild_channels_view())),
("Emojis: {0}", lambda c: len(c.get_emojis_view())),
("Available Guilds: {0}", lambda c: len(c.get_available_guilds_view())),
("Unavailable Guilds: {0}", lambda c: len(c.get_unavailable_guilds_view())),
("Invites: {0}", lambda c: len(c.get_invites_view())),
("Members: {0}", lambda c: sum(len(record) for record in c.get_members_view().values())),
("Messages: {0}", lambda c: len(c.get_messages_view())),
("Presences: {0}", lambda c: sum(len(record) for record in c.get_presences_view().values())),
("Roles: {0}", lambda c: len(c.get_roles_view())),
("Users: {0}", lambda c: len(c.get_users_view())),
("Voice states: {0}", lambda c: sum(len(record) for record in c.get_voice_states_view().values())),
]
@basic_component.with_slash_command
@tanjun.as_slash_command("cache", "Get general information about this bot's cache.")
async def cache_command(
ctx: tanjun.abc.Context,
process: psutil.Process = tanjun.cached_inject(psutil.Process),
cache: hikari.api.Cache = tanjun.inject(type=hikari.api.Cache),
me: hikari.OwnUser = tanjun.inject_lc(hikari.OwnUser),
) -> None:
"""Get general information about this bot."""
start_date = datetime.datetime.fromtimestamp(process.create_time())
uptime = datetime.datetime.now() - start_date
memory_usage: float = process.memory_full_info().uss / 1024 ** 2
cpu_usage: float = process.cpu_percent() / psutil.cpu_count()
memory_percent: float = process.memory_percent()
cache_stats_lines: list[tuple[str, float]] = []
storage_start_time = time.perf_counter()
for line_template, callback in _about_lines:
line_start_time = time.perf_counter()
line = line_template.format(callback(cache))
cache_stats_lines.append((line, (time.perf_counter() - line_start_time) * 1_000))
storage_time_taken = time.perf_counter() - storage_start_time
# This also accounts for the decimal place and 4 decimal places
left_pad = math.floor(math.log(max(num for _, num in cache_stats_lines), 10)) + 6
largest_line = max(len(line) for line, _ in cache_stats_lines)
cache_stats = "\n".join(
line + " " * (largest_line + 2 - len(line)) + "{0:0{left_pad}.4f} ms".format(time_taken, left_pad=left_pad)
for line, time_taken in cache_stats_lines
)
embed = (
hikari.Embed(description="An experimental pythonic Hikari bot.", color=0x55CDFC)
.set_author(name="Hikari: testing client", icon=me.avatar_url or me.default_avatar_url, url=hikari.__url__)
.add_field(name="Uptime", value=str(uptime), inline=True)
.add_field(
name="Process",
value=f"{memory_usage:.2f} MB ({memory_percent:.0f}%)\n{cpu_usage:.2f}% CPU",
inline=True,
)
.add_field(name="Standard cache stats", value=f"```{cache_stats}```")
.set_footer(
icon="http://i.imgur.com/5BFecvA.png",
text=f"Made with Hikari v{hikari.__version__} (python {platform.python_version()})",
)
)
error_manager = utility.HikariErrorManager(break_on=(hikari.NotFoundError, hikari.ForbiddenError))
await error_manager.try_respond(ctx, content=f"{storage_time_taken * 1_000:.4g} ms", embed=embed)
@cache_command.with_check
def _(ctx: tanjun.abc.Context) -> bool:
if ctx.cache:
return True
raise tanjun.CommandError("Client is cache-less")
@basic_component.with_slash_command
@tanjun.as_slash_command("invite", "Invite the bot to your server(s)")
async def invite_command(ctx: tanjun.abc.Context, me: hikari.OwnUser = tanjun.inject_lc(hikari.OwnUser)) -> None:
await ctx.respond(
f"https://discord.com/oauth2/authorize?client_id={me.id}&scope=bot%20applications.commands&permissions=8"
)
@tanjun.as_loader
def load_basic(cli: tanjun.Client, /) -> None:
cli.add_component(basic_component.copy())
@tanjun.as_unloader
def unload_basic(cli: tanjun.Client, /) -> None:
cli.remove_component_by_name(basic_component.name)
| 41.389892
| 115
| 0.704143
|
794e9a7baff952bc6c3caf54622466869b0ffc57
| 1,553
|
py
|
Python
|
neutron/plugins/common/constants.py
|
kevinbenton/neutron
|
f27fba3ad77d907713e3e1cbfa45d33e0135c08b
|
[
"Apache-2.0"
] | null | null | null |
neutron/plugins/common/constants.py
|
kevinbenton/neutron
|
f27fba3ad77d907713e3e1cbfa45d33e0135c08b
|
[
"Apache-2.0"
] | null | null | null |
neutron/plugins/common/constants.py
|
kevinbenton/neutron
|
f27fba3ad77d907713e3e1cbfa45d33e0135c08b
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# service type constants:
CORE = "CORE"
DUMMY = "DUMMY"
LOADBALANCER = "LOADBALANCER"
FIREWALL = "FIREWALL"
VPN = "VPN"
#maps extension alias to service type
EXT_TO_SERVICE_MAPPING = {
'dummy': DUMMY,
'lbaas': LOADBALANCER,
'fwaas': FIREWALL,
'vpnaas': VPN,
}
# TODO(salvatore-orlando): Move these (or derive them) from conf file
ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER, FIREWALL, VPN]
COMMON_PREFIXES = {
CORE: "",
DUMMY: "/dummy_svc",
LOADBALANCER: "/lb",
FIREWALL: "/fw",
VPN: "/vpn",
}
# Service operation status constants
ACTIVE = "ACTIVE"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
# FWaaS firewall rule action
FWAAS_ALLOW = "allow"
FWAAS_DENY = "deny"
# L3 Protocol name constants
TCP = "tcp"
UDP = "udp"
ICMP = "icmp"
| 25.883333
| 78
| 0.707019
|
794e9aa0a3aef6ea9f5f03b676262650592a285e
| 50,287
|
py
|
Python
|
jina/flow/base.py
|
ezhaohongwei/jina
|
9769f2e35eb8a196304a145409f959a7beac0432
|
[
"Apache-2.0"
] | 1
|
2021-06-14T00:35:14.000Z
|
2021-06-14T00:35:14.000Z
|
jina/flow/base.py
|
ezhaohongwei/jina
|
9769f2e35eb8a196304a145409f959a7beac0432
|
[
"Apache-2.0"
] | null | null | null |
jina/flow/base.py
|
ezhaohongwei/jina
|
9769f2e35eb8a196304a145409f959a7beac0432
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import base64
import copy
import os
import re
import threading
import uuid
import warnings
from collections import OrderedDict, defaultdict
from contextlib import ExitStack
from typing import Optional, Union, Tuple, List, Set, Dict, overload, Type
from .builder import build_required, _build_flow, _hanging_pods
from .. import __default_host__
from ..clients import Client
from ..clients.mixin import AsyncPostMixin, PostMixin
from ..enums import FlowBuildLevel, PodRoleType, FlowInspectType
from ..excepts import FlowTopologyError, FlowMissingPodError
from ..helper import (
colored,
get_public_ip,
get_internal_ip,
typename,
ArgNamespace,
download_mermaid_url,
)
from ..jaml import JAMLCompatible
from ..logging.logger import JinaLogger
from ..parsers import set_gateway_parser, set_pod_parser
from ..peapods import Pod
from ..peapods.pods.compound import CompoundPod
from ..peapods.pods.factory import PodFactory
__all__ = ['Flow']
class FlowType(type(ExitStack), type(JAMLCompatible)):
"""Type of Flow, metaclass of :class:`BaseFlow`"""
pass
_regex_port = r'(.*?):([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$'
if False:
from ..peapods import BasePod
from ..executors import BaseExecutor
from ..clients.base import BaseClient
class Flow(PostMixin, JAMLCompatible, ExitStack, metaclass=FlowType):
"""Flow is how Jina streamlines and distributes Executors. """
# overload_inject_start_flow
@overload
def __init__(
self,
asyncio: Optional[bool] = False,
continue_on_error: Optional[bool] = False,
description: Optional[str] = None,
env: Optional[dict] = None,
host: Optional[str] = '0.0.0.0',
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
port_expose: Optional[int] = None,
proxy: Optional[bool] = False,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
request_size: Optional[int] = 100,
restful: Optional[bool] = False,
return_results: Optional[bool] = False,
show_progress: Optional[bool] = False,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
workspace: Optional[str] = './',
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param continue_on_error: If set, a Request that causes error will be logged only without blocking the further requests.
:param description: The description of this object. It will be used in automatics docs UI.
:param env: The map of environment variables that are available inside runtime
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param inspect: The strategy on those inspect pods in the flow.
If `REMOVE` is given then all inspect pods are removed when building the flow.
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- automatics docs UI
- ...
When not given, then the default naming strategy will apply.
:param port_expose: The port of the host exposed to the public
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param request_size: The number of Documents in each Request.
:param restful: If set, use RESTful interface instead of gRPC as the main interface. This expects the corresponding Flow to be set with --restful as well.
:param return_results: This feature is only used for AsyncClient.
If set, the results of all Requests will be returned as a list. This is useful when one wants
process Responses in bulk instead of using callback.
:param show_progress: If set, client will show a progress bar on receiving every request.
:param uses: The YAML file represents a flow
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._pod_nodes = OrderedDict() # type: Dict[str, BasePod]
self._inspect_pods = {} # type: Dict[str, str]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_pod = [
'gateway'
] #: default first pod is gateway, will add when build()
self._update_args(args, **kwargs)
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
def _update_args(self, args, **kwargs):
from ..parsers.flow import set_flow_parser
from ..helper import ArgNamespace
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(kwargs, _flow_parser)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = vars(args)
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
base_cls = self.__class__
base_cls_name = self.__class__.__name__
if self.args.asyncio and not isinstance(self, AsyncPostMixin):
self.__class__ = type(base_cls_name, (AsyncPostMixin, base_cls), {})
@staticmethod
def _parse_endpoints(op_flow, pod_name, endpoint, connect_to_last_pod=False) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_pod and connect_to_last_pod:
endpoint = [op_flow.last_pod]
else:
endpoint = []
if isinstance(endpoint, (list, tuple)):
for idx, s in enumerate(endpoint):
if s == pod_name:
raise FlowTopologyError(
'the income/output of a pod can not be itself'
)
else:
raise ValueError(f'endpoint={endpoint} is not parsable')
# if an endpoint is being inspected, then replace it with inspected Pod
endpoint = set(op_flow._inspect_pods.get(ep, ep) for ep in endpoint)
return endpoint
@property
def last_pod(self):
"""Last pod
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_pod[-1]
@last_pod.setter
def last_pod(self, name: str):
"""
Set a Pod as the last Pod in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Pod
"""
if name not in self._pod_nodes:
raise FlowMissingPodError(f'{name} can not be found in this Flow')
if self._last_changed_pod and name == self.last_pod:
pass
else:
self._last_changed_pod.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
def _add_gateway(self, needs, **kwargs):
pod_name = 'gateway'
kwargs.update(
dict(
name=pod_name,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
runtime_cls='RESTRuntime' if self.args.restful else 'GRPCRuntime',
pod_role=PodRoleType.GATEWAY,
)
)
kwargs.update(vars(self.args))
kwargs.update(self._common_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
self._pod_nodes[pod_name] = Pod(args, needs)
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all peas defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name, needs=needs, pod_role=PodRoleType.JOIN, *args, **kwargs
)
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all hanging Pods so far and add a blocker to the Flow; wait until all handing peas completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_pods(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_pod
@overload
def add(
self,
ctrl_with_ipc: Optional[bool] = False,
daemon: Optional[bool] = False,
description: Optional[str] = None,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
expose_public: Optional[bool] = False,
external: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
host_out: Optional[str] = '0.0.0.0',
log_config: Optional[str] = None,
memory_hwm: Optional[int] = -1,
name: Optional[str] = None,
on_error_strategy: Optional[str] = 'IGNORE',
parallel: Optional[int] = 1,
peas_hosts: Optional[List[str]] = None,
polling: Optional[str] = 'ANY',
port_ctrl: Optional[int] = None,
port_expose: Optional[int] = None,
port_in: Optional[int] = None,
port_out: Optional[int] = None,
proxy: Optional[bool] = False,
pull_latest: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
quiet_remote_logs: Optional[bool] = False,
replicas: Optional[int] = 1,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'ZEDRuntime',
scheduling: Optional[str] = 'LOAD_BALANCE',
socket_in: Optional[str] = 'PULL_BIND',
socket_out: Optional[str] = 'PUSH_BIND',
ssh_keyfile: Optional[str] = None,
ssh_password: Optional[str] = None,
ssh_server: Optional[str] = None,
timeout_ctrl: Optional[int] = 5000,
timeout_ready: Optional[int] = 600000,
upload_files: Optional[List[str]] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_internal: Optional[
Union[str, Type['BaseExecutor'], dict]
] = 'BaseExecutor',
volumes: Optional[List[str]] = None,
workspace: Optional[str] = None,
workspace_id: Optional[str] = None,
**kwargs,
) -> 'Flow':
"""Add an Executor to the current Flow object.
:param ctrl_with_ipc: If set, use ipc protocol for control socket
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param description: The description of this object. It will be used in automatics docs UI.
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param external: The Pod will be considered an external Pod that has been started independently from the Flow. This Pod will not be context managed by the Flow, and is considered with `--freeze-network-settings`
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for input, by default it is 0.0.0.0
:param host_out: The host address for output, by default it is 0.0.0.0
:param log_config: The YAML config of the logger used in this object.
:param memory_hwm: The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. -1 means no restriction
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- automatics docs UI
- ...
When not given, then the default naming strategy will apply.
:param on_error_strategy: The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
:param parallel: The number of parallel peas in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param peas_hosts: The hosts of the peas when parallel greater than 1.
Peas will be evenly distributed among the hosts. By default,
peas are running on host provided by the argument ``host``
:param polling: The polling strategy of the Pod (when `parallel>1`)
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
:param port_ctrl: The port for controlling the runtime, default a random port between [49152, 65535]
:param port_expose: The port of the host exposed to the public
:param port_in: The port for input data, default a random port between [49152, 65535]
:param port_out: The port for output data, default a random port between [49152, 65535]
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param pull_latest: Pull the latest image before running
:param py_modules: The customized python modules need to be imported before loading the executor
Note, when importing multiple files and there is a dependency between them, then one has to write the dependencies in
reverse order. That is, if `__init__.py` depends on `A.py`, which again depends on `B.py`, then you need to write:
--py-modules __init__.py --py-modules B.py --py-modules A.py
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param quiet_remote_logs: Do not display the streaming of remote logs on local console
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param scheduling: The strategy of scheduling workload among Peas
:param socket_in: The socket type for input port
:param socket_out: The socket type for output port
:param ssh_keyfile: This specifies a key to be used in ssh login, default None. regular default ssh keys will be used without specifying this argument.
:param ssh_password: The ssh password to the ssh server.
:param ssh_server: The SSH server through which the tunnel will be created, can actually be a fully specified `user@server:port` ssh url.
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param upload_files: The files on the host to be uploaded to the remote
workspace. This can be useful when your Pod has more
file dependencies beyond a single YAML file, e.g.
Python files, data files.
Note,
- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.
- by default, `--uses` YAML file is always uploaded.
- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.
:param uses: The config of the executor, it could be one of the followings:
* an Executor-level YAML file path (.yml, .yaml, .jaml)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Peas described by --uses, typically used for receiving from all parallels, accepted type follows `--uses`
:param uses_before: The executor attached after the Peas described by --uses, typically before sending to all parallels, accepted type follows `--uses`
:param uses_internal: The config runs inside the Docker container.
Syntax and function are the same as `--uses`. This is designed when `--uses="docker://..."` this config is passed to
the Docker container.
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param workspace_id: the UUID for identifying the workspace. When not given a random id will be assigned.Multiple Pea/Pod/Flow will work under the same workspace if they share the same `workspace-id`.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_pod
def add(
self,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
pod_role: 'PodRoleType' = PodRoleType.POD,
**kwargs,
) -> 'Flow':
"""
Add a Pod to the current Flow object and return the new modified Flow object.
The attribute of the Pod can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
.. # noqa: DAR401
:param needs: the name of the Pod(s) that this Pod receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param pod_role: the role of the Pod, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Pod CLI supports
:return: a (new) Flow object with modification
"""
op_flow = copy.deepcopy(self) if copy_flow else self
# pod naming logic
pod_name = kwargs.get('name', None)
if pod_name in op_flow._pod_nodes:
new_name = f'{pod_name}{len(op_flow._pod_nodes)}'
self.logger.debug(
f'"{pod_name}" is used in this Flow already! renamed it to "{new_name}"'
)
pod_name = new_name
if not pod_name:
pod_name = f'pod{len(op_flow._pod_nodes)}'
if not pod_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {pod_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, pod_name, needs, connect_to_last_pod=True
)
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
if key not in kwargs:
kwargs[key] = value
# check if host is set to remote:port
if 'host' in kwargs:
m = re.match(_regex_port, kwargs['host'])
if (
kwargs.get('host', __default_host__) != __default_host__
and m
and 'port_expose' not in kwargs
):
kwargs['port_expose'] = m.group(2)
kwargs['host'] = m.group(1)
# update kwargs of this Pod
kwargs.update(dict(name=pod_name, pod_role=pod_role, num_part=len(needs)))
parser = set_pod_parser()
if pod_role == PodRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(kwargs, parser)
# pod workspace if not set then derive from flow workspace
args.workspace = os.path.abspath(args.workspace or self.workspace)
op_flow._pod_nodes[pod_name] = PodFactory.build_pod(args, needs)
op_flow.last_pod = pod_name
return op_flow
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Pod in the Flow
Internally, it adds two Pods to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- BasePod(_pass) -- Flow
|
-- PUB-SUB -- InspectPod (Hanging)
In this way, :class:`InspectPod` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Pod
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_pod = self.last_pod
op_flow = self.add(
name=name, needs=_last_pod, pod_role=PodRoleType.INSPECT, *args, **kwargs
)
# now remove uses and add an auxiliary Pod
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_pod,
pod_role=PodRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_pod by the auxiliary Pod
op_flow._inspect_pods[_last_pod] = op_flow.last_pod
return op_flow
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_pod: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Pods output into one Pod. When the Flow has no inspect Pod then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Pod
:param include_last_pod: if to include the last modified Pod in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [k for k, v in self._pod_nodes.items() if v.role == PodRoleType.INSPECT]
if needs:
if include_last_pod:
needs.append(self.last_pod)
return self.add(
name=name,
needs=needs,
pod_role=PodRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def build(self, copy_flow: bool = False) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if 'gateway' not in op_flow._pod_nodes:
op_flow._add_gateway(needs={op_flow.last_pod})
# construct a map with a key a start node and values an array of its end nodes
_outgoing_map = defaultdict(list)
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
op_flow._pod_nodes = {
k: v for k, v in op_flow._pod_nodes.items() if not v.role.is_inspect
}
reverse_inspect_map = {v: k for k, v in op_flow._inspect_pods.items()}
for end, pod in op_flow._pod_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Pod
# but not those inspect related node
if op_flow.args.inspect.is_keep:
pod.needs = set(
ep if pod.role.is_inspect else op_flow._inspect_pods.get(ep, ep)
for ep in pod.needs
)
else:
pod.needs = set(reverse_inspect_map.get(ep, ep) for ep in pod.needs)
for start in pod.needs:
if start not in op_flow._pod_nodes:
raise FlowMissingPodError(
f'{start} is not in this flow, misspelled name?'
)
_outgoing_map[start].append(end)
op_flow = _build_flow(op_flow, _outgoing_map)
hanging_pods = _hanging_pods(op_flow)
if hanging_pods:
op_flow.logger.warning(
f'{hanging_pods} are hanging in this flow with no pod receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __enter__(self):
class CatchAllCleanupContextManager:
"""
This context manager guarantees, that the :method:``__exit__`` of the
sub context is called, even when there is an Exception in the
:method:``__enter__``.
:param sub_context: The context, that should be taken care of.
"""
def __init__(self, sub_context):
self.sub_context = sub_context
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.sub_context.__exit__(exc_type, exc_val, exc_tb)
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.unsetenv(k)
if 'gateway' in self._pod_nodes:
self._pod_nodes.pop('gateway')
self._build_level = FlowBuildLevel.EMPTY
self.logger.success(
f'flow is closed and all resources are released, current build level is {self._build_level}'
)
self.logger.close()
def start(self):
"""Start to run all Pods in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.peapods.peas.BasePea`
.. # noqa: DAR401
:return: this instance
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
# set env only before the Pod get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for k, v in self:
v.args.noblock_on_start = True
if not getattr(v.args, 'external', False):
self.enter_context(v)
for k, v in self:
try:
if not getattr(v.args, 'external', False):
v.wait_start_success()
except Exception as ex:
self.logger.error(
f'{k}:{v!r} can not be started due to {ex!r}, Flow is aborted'
)
self.close()
raise
self.logger.info(
f'{self.num_pods} Pods (i.e. {self.num_peas} Peas) are running in this Flow'
)
self._build_level = FlowBuildLevel.RUNNING
self._show_success_message()
return self
@property
def num_pods(self) -> int:
"""Get the number of Pods in this Flow
.. # noqa: DAR201"""
return len(self._pod_nodes)
@property
def num_peas(self) -> int:
"""Get the number of peas (parallel count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_peas for v in self._pod_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
a = self.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
b = other.build()
else:
b = other
return a._pod_nodes == b._pod_nodes
@property
@build_required(FlowBuildLevel.GRAPH)
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
self.args.port_expose = self.port_expose
self.args.host = self.host
self.args.show_progress = True
return Client(self.args)
@property
def _mermaid_str(self):
mermaid_graph = [
"%%{init: {'theme': 'base', "
"'themeVariables': { 'primaryColor': '#32C8CD', "
"'edgeLabelBackground':'#fff', 'clusterBkg': '#FFCC66'}}}%%",
'graph LR',
]
start_repl = {}
end_repl = {}
for node, v in self._pod_nodes.items():
if not v.is_singleton and v.role != PodRoleType.GATEWAY:
if v.args.replicas == 1:
mermaid_graph.append(
f'subgraph sub_{node} ["{node} ({v.args.parallel})"]'
)
else:
mermaid_graph.append(
f'subgraph sub_{node} ["{node} ({v.args.replicas})({v.args.parallel})"]'
)
if v.is_head_router:
head_router = node + '_HEAD'
end_repl[node] = (head_router, '((fa:fa-random))')
if v.is_tail_router:
tail_router = node + '_TAIL'
start_repl[node] = (tail_router, '((fa:fa-random))')
for i in range(v.args.replicas):
if v.is_head_router:
head_replica_router = node + f'_{i}_HEAD'
if v.args.replicas == 1:
end_repl[node] = (head_replica_router, '((fa:fa-random))')
if v.is_tail_router:
tail_replica_router = node + f'_{i}_TAIL'
if v.args.replicas == 1:
start_repl[node] = (tail_replica_router, '((fa:fa-random))')
p_r = '((%s))'
p_e = '[[%s]]'
if v.args.replicas > 1:
mermaid_graph.append(
f'\t{head_router}{p_r % "head"}:::pea-->{head_replica_router}{p_e % "replica_head"}:::pea'
)
mermaid_graph.append(
f'\t{tail_replica_router}{p_r % "replica_tail"}:::pea-->{tail_router}{p_e % "tail"}:::pea'
)
for j in range(v.args.parallel):
r = node
if v.args.replicas > 1:
r += f'_{i}_{j}'
elif v.args.parallel > 1:
r += f'_{j}'
if v.is_head_router:
mermaid_graph.append(
f'\t{head_replica_router}{p_r % "head"}:::pea-->{r}{p_e % r}:::pea'
)
if v.is_tail_router:
mermaid_graph.append(
f'\t{r}{p_e % r}:::pea-->{tail_replica_router}{p_r % "tail"}:::pea'
)
mermaid_graph.append('end')
for node, v in self._pod_nodes.items():
ed_str = str(v.head_args.socket_in).split('_')[0]
for need in sorted(v.needs):
edge_str = ''
if need in self._pod_nodes:
st_str = str(self._pod_nodes[need].tail_args.socket_out).split('_')[
0
]
edge_str = f'|{st_str}-{ed_str}|'
_s = start_repl.get(need, (need, f'({need})'))
_e = end_repl.get(node, (node, f'({node})'))
_s_role = self._pod_nodes[need].role
_e_role = self._pod_nodes[node].role
line_st = '-->'
if _s_role in {PodRoleType.INSPECT, PodRoleType.JOIN_INSPECT}:
_s = start_repl.get(need, (need, f'{{{{{need}}}}}'))
if _e_role == PodRoleType.GATEWAY:
_e = ('gateway_END', f'({node})')
elif _e_role in {PodRoleType.INSPECT, PodRoleType.JOIN_INSPECT}:
_e = end_repl.get(node, (node, f'{{{{{node}}}}}'))
if _s_role == PodRoleType.INSPECT or _e_role == PodRoleType.INSPECT:
line_st = '-.->'
mermaid_graph.append(
f'{_s[0]}{_s[1]}:::{str(_s_role)} {line_st} {edge_str}{_e[0]}{_e[1]}:::{str(_e_role)}'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.POD)} fill:#32C8CD,stroke:#009999'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.INSPECT)} fill:#ff6666,color:#fff'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.JOIN_INSPECT)} fill:#ff6666,color:#fff'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.GATEWAY)} fill:#6E7278,color:#fff'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.INSPECT_AUX_PASS)} fill:#fff,color:#000,stroke-dasharray: 5 5'
)
mermaid_graph.append('classDef pea fill:#009999,stroke:#1E6E73')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = False,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='pod_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
op_flow = copy.deepcopy(self) if copy_flow else self
if build:
op_flow.build(False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('graph LR', 'graph TD')
image_type = 'svg'
if output and output.endswith('jpg'):
image_type = 'jpg'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import display, Image
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
op_flow.logger.info(f'flow visualization: {url}')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
if img_type == 'jpg':
img_type = 'img'
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
@build_required(FlowBuildLevel.GRAPH)
def port_expose(self) -> int:
"""Return the exposed port of the gateway
.. # noqa: DAR201"""
return self._pod_nodes['gateway'].port_expose
@property
@build_required(FlowBuildLevel.GRAPH)
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201"""
return self._pod_nodes['gateway'].host
@property
@build_required(FlowBuildLevel.GRAPH)
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
@build_required(FlowBuildLevel.GRAPH)
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
def __iter__(self):
return self._pod_nodes.items().__iter__()
def _show_success_message(self):
if self._pod_nodes['gateway'].args.restful:
header = 'http://'
protocol = 'REST'
else:
header = 'tcp://'
protocol = 'gRPC'
address_table = [
f'\t🖥️ Local access:\t'
+ colored(
f'{header}{self.host}:{self.port_expose}', 'cyan', attrs='underline'
),
f'\t🔒 Private network:\t'
+ colored(
f'{header}{self.address_private}:{self.port_expose}',
'cyan',
attrs='underline',
),
]
if self.address_public:
address_table.append(
f'\t🌐 Public address:\t'
+ colored(
f'{header}{self.address_public}:{self.port_expose}',
'cyan',
attrs='underline',
)
)
self.logger.success(
f'🎉 Flow is ready to use, accepting {colored(protocol + " request", attrs="bold")}'
)
self.logger.info('\n' + '\n'.join(address_table))
def block(self):
"""Block the process until user hits KeyboardInterrupt"""
try:
threading.Event().wait()
except KeyboardInterrupt:
pass
def use_grpc_gateway(self, port: Optional[int] = None):
"""Change to use gRPC gateway for Flow IO.
You can change the gateway even in the runtime.
:param port: the new port number to expose
"""
self._switch_gateway('GRPCRuntime', port)
def _switch_gateway(self, gateway: str, port: int):
restful = gateway == 'RESTRuntime'
# globally register this at Flow level
self.args.restful = restful
if port:
self.args.port_expose = port
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self['gateway'].args.restful = restful
self['gateway'].args.runtime_cls = gateway
if port:
self['gateway'].args.port_expose = port
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self['gateway'].close()
self.enter_context(self['gateway'])
self['gateway'].wait_start_success()
def use_rest_gateway(self, port: Optional[int] = None):
"""Change to use REST gateway for IO.
You can change the gateway even in the runtime.
:param port: the new port number to expose
"""
self._switch_gateway('RESTRuntime', port)
def __getitem__(self, item):
if isinstance(item, str):
return self._pod_nodes[item]
elif isinstance(item, int):
return list(self._pod_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
return os.path.abspath(self.args.workspace or './')
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Pods' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Pods' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'peas_args', getattr(p, 'replicas_args', None))
if args is None:
raise ValueError(
f'could not find "peas_args" or "replicas_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def identity(self) -> Dict[str, str]:
"""Get all Pods' ``identity`` values in a dict
.. # noqa: DAR201
"""
return {k: p.args.identity for k, p in self}
@identity.setter
def identity(self, value: str):
"""Set all Pods' ``identity`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
# Re-initiating logger with new identity
self.logger = JinaLogger(self.__class__.__name__, **vars(self.args))
for _, p in self:
p.args.identity = value
# for backward support
join = needs
def rolling_update(self, pod_name: str, dump_path: Optional[str] = None):
"""
Reload Pods sequentially - only used for compound pods.
:param dump_path: the path from which to read the dump data
:param pod_name: pod to update
"""
# TODO: By design after the Flow object started, Flow shouldn't have memory access to its sub-objects anymore.
# All controlling should be issued via Network Request, not via memory access.
# In the current master, we have Flow.rolling_update() & Flow.dump() method avoid the above design.
# Avoiding this design make the whole system NOT cloud-native.
warnings.warn(
'This function is experimental and facing potential refactoring',
FutureWarning,
)
compound_pod = self._pod_nodes[pod_name]
if isinstance(compound_pod, CompoundPod):
compound_pod.rolling_update(dump_path)
else:
raise ValueError(
f'The BasePod {pod_name} is not a CompoundPod and does not support updating'
)
| 40.391165
| 283
| 0.597073
|
794e9abe4023e306179ab9ad9289f65987599dc1
| 2,067
|
py
|
Python
|
deprecated/stageI/run_exp_mscoco.py
|
yao-zhao/EDGAN
|
b3164fb9d5d9b571b52328b7dd187b748d5a304d
|
[
"MIT"
] | 3
|
2017-05-30T03:57:59.000Z
|
2019-04-15T07:05:17.000Z
|
deprecated/stageI/run_exp_mscoco.py
|
yao-zhao/EDGAN
|
b3164fb9d5d9b571b52328b7dd187b748d5a304d
|
[
"MIT"
] | null | null | null |
deprecated/stageI/run_exp_mscoco.py
|
yao-zhao/EDGAN
|
b3164fb9d5d9b571b52328b7dd187b748d5a304d
|
[
"MIT"
] | null | null | null |
from __future__ import division
from __future__ import print_function
import dateutil
import dateutil.tz
import datetime
import argparse
import pprint
from shutil import copyfile
import os
from misc.dataloader import DataLoader
from stageI.model import CondGAN
from stageI.trainer_mscoco import CondGANTrainer_mscoco
from misc.utils import mkdir_p
from misc.config import cfg, cfg_from_file
def parse_args():
parser = argparse.ArgumentParser(description='Train a GAN network')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='stageI/cfg/mscoco.yml', type=str)
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=-1, type=int)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.gpu_id != -1:
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
tfrecord_path = 'Data/%s/%s.tfrecords' % \
(cfg.DATASET_NAME, cfg.DATASET.TFRECORDS)
crop_size = cfg.TRAIN.LR_IMSIZE
dataset = DataLoader(tfrecord_path, [crop_size, crop_size],
num_examples=cfg.DATASET.NUM_EXAMPLES)
if cfg.TRAIN.FLAG:
ckt_logs_dir = "ckt_logs/%s/%s_%s" % \
(cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp)
mkdir_p(ckt_logs_dir)
else:
s_tmp = cfg.TRAIN.PRETRAINED_MODEL
ckt_logs_dir = s_tmp[:s_tmp.find('.ckpt')]
model = CondGAN(
image_shape=dataset.image_shape
)
copyfile(os.path.join('stageI', 'cfg', 'mscoco.yml'), os.path.join(ckt_logs_dir, 'mscoco.yml'))
algo = CondGANTrainer_mscoco(
model=model,
dataset=dataset,
ckt_logs_dir=ckt_logs_dir
)
if cfg.TRAIN.FLAG:
algo.train()
else:
algo.evaluate()
| 29.528571
| 99
| 0.654088
|
794e9b01871545eeb8d49c1339fa0cb3b38f04fb
| 5,807
|
py
|
Python
|
core/dbt/config/renderer.py
|
sethwoodworth/dbt
|
68babfb4bbd016e198bb09ac8dfd5dc71760ef7e
|
[
"Apache-2.0"
] | 1
|
2020-10-25T00:13:50.000Z
|
2020-10-25T00:13:50.000Z
|
core/dbt/config/renderer.py
|
azhard/dbt
|
9cd7cbc9e35e5a7c8c4f17a3d113263f4421ab55
|
[
"Apache-2.0"
] | null | null | null |
core/dbt/config/renderer.py
|
azhard/dbt
|
9cd7cbc9e35e5a7c8c4f17a3d113263f4421ab55
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Any, Tuple, Optional, Union
from dbt.clients.jinja import get_rendered
from dbt.exceptions import DbtProjectError
from dbt.exceptions import RecursionException
from dbt.node_types import NodeType
from dbt.utils import deep_map
Keypath = Tuple[Union[str, int], ...]
class BaseRenderer:
def __init__(self, context: Dict[str, Any]) -> None:
self.context = context
@property
def name(self):
return 'Rendering'
def should_render_keypath(self, keypath: Keypath) -> bool:
return True
def render_entry(self, value: Any, keypath: Keypath) -> Any:
if not self.should_render_keypath(keypath):
return value
return self.render_value(value, keypath)
def render_value(
self, value: Any, keypath: Optional[Keypath] = None
) -> Any:
# keypath is ignored.
# if it wasn't read as a string, ignore it
if not isinstance(value, str):
return value
return get_rendered(value, self.context, native=True)
def render_data(
self, data: Dict[str, Any]
) -> Dict[str, Any]:
try:
return deep_map(self.render_entry, data)
except RecursionException:
raise DbtProjectError(
f'Cycle detected: {self.name} input has a reference to itself',
project=data
)
class DbtProjectYamlRenderer(BaseRenderer):
def __init__(
self, context: Dict[str, Any], version: Optional[int] = None
) -> None:
super().__init__(context)
self.version: Optional[int] = version
@property
def name(self):
'Project config'
def get_package_renderer(self) -> BaseRenderer:
return PackageRenderer(self.context)
def should_render_keypath_v1(self, keypath: Keypath) -> bool:
if not keypath:
return True
first = keypath[0]
# run hooks
if first in {'on-run-start', 'on-run-end', 'query-comment'}:
return False
# models have two things to avoid
if first in {'seeds', 'models', 'snapshots', 'seeds'}:
# model-level hooks
if 'pre-hook' in keypath or 'post-hook' in keypath:
return False
# model-level 'vars' declarations
if 'vars' in keypath:
return False
return True
def should_render_keypath_v2(self, keypath: Keypath) -> bool:
if not keypath:
return True
first = keypath[0]
# run hooks are not rendered
if first in {'on-run-start', 'on-run-end', 'query-comment'}:
return False
# don't render vars blocks until runtime
if first == 'vars':
return False
if first in {'seeds', 'models', 'snapshots', 'seeds'}:
# model-level hooks
if 'pre-hook' in keypath or 'post-hook' in keypath:
return False
# model-level 'vars' declarations
if 'vars' in keypath:
return False
return True
def should_render_keypath(self, keypath: Keypath) -> bool:
if self.version == 2:
return self.should_render_keypath_v2(keypath)
else: # could be None
return self.should_render_keypath_v1(keypath)
def render_data(
self, data: Dict[str, Any]
) -> Dict[str, Any]:
if self.version is None:
self.version = data.get('current-version')
try:
return deep_map(self.render_entry, data)
except RecursionException:
raise DbtProjectError(
f'Cycle detected: {self.name} input has a reference to itself',
project=data
)
class ProfileRenderer(BaseRenderer):
@property
def name(self):
'Profile'
class SchemaYamlRenderer(BaseRenderer):
DOCUMENTABLE_NODES = frozenset(
n.pluralize() for n in NodeType.documentable()
)
@property
def name(self):
return 'Rendering yaml'
def _is_norender_key(self, keypath: Keypath) -> bool:
"""
models:
- name: blah
- description: blah
tests: ...
- columns:
- name:
- description: blah
tests: ...
Return True if it's tests or description - those aren't rendered
"""
if len(keypath) >= 2 and keypath[1] in ('tests', 'description'):
return True
if (
len(keypath) >= 4 and
keypath[1] == 'columns' and
keypath[3] in ('tests', 'description')
):
return True
return False
# don't render descriptions or test keyword arguments
def should_render_keypath(self, keypath: Keypath) -> bool:
if len(keypath) < 2:
return True
if keypath[0] not in self.DOCUMENTABLE_NODES:
return True
if len(keypath) < 3:
return True
if keypath[0] == NodeType.Source.pluralize():
if keypath[2] == 'description':
return False
if keypath[2] == 'tables':
if self._is_norender_key(keypath[3:]):
return False
elif keypath[0] == NodeType.Macro.pluralize():
if keypath[2] == 'arguments':
if self._is_norender_key(keypath[3:]):
return False
elif self._is_norender_key(keypath[1:]):
return False
else: # keypath[0] in self.DOCUMENTABLE_NODES:
if self._is_norender_key(keypath[1:]):
return False
return True
class PackageRenderer(BaseRenderer):
@property
def name(self):
return 'Packages config'
| 28.747525
| 79
| 0.569141
|
794e9b1ddfbdb4f03412dca5be3ba702143be01f
| 6,985
|
py
|
Python
|
talon_one/models/feature_flags.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2021-03-05T06:41:26.000Z
|
2021-03-05T06:41:26.000Z
|
talon_one/models/feature_flags.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2021-09-07T08:56:58.000Z
|
2021-09-07T08:56:58.000Z
|
talon_one/models/feature_flags.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2019-05-21T10:27:54.000Z
|
2019-05-21T10:27:54.000Z
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class FeatureFlags(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'account_id': 'int',
'loyalty': 'bool',
'coupons_without_count': 'bool',
'beta_effects': 'bool'
}
attribute_map = {
'account_id': 'accountId',
'loyalty': 'loyalty',
'coupons_without_count': 'coupons_without_count',
'beta_effects': 'betaEffects'
}
def __init__(self, account_id=None, loyalty=None, coupons_without_count=None, beta_effects=None, local_vars_configuration=None): # noqa: E501
"""FeatureFlags - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._account_id = None
self._loyalty = None
self._coupons_without_count = None
self._beta_effects = None
self.discriminator = None
self.account_id = account_id
if loyalty is not None:
self.loyalty = loyalty
if coupons_without_count is not None:
self.coupons_without_count = coupons_without_count
if beta_effects is not None:
self.beta_effects = beta_effects
@property
def account_id(self):
"""Gets the account_id of this FeatureFlags. # noqa: E501
The ID of the account that owns this entity. # noqa: E501
:return: The account_id of this FeatureFlags. # noqa: E501
:rtype: int
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this FeatureFlags.
The ID of the account that owns this entity. # noqa: E501
:param account_id: The account_id of this FeatureFlags. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and account_id is None: # noqa: E501
raise ValueError("Invalid value for `account_id`, must not be `None`") # noqa: E501
self._account_id = account_id
@property
def loyalty(self):
"""Gets the loyalty of this FeatureFlags. # noqa: E501
Whether the account has access to the loyalty features or not # noqa: E501
:return: The loyalty of this FeatureFlags. # noqa: E501
:rtype: bool
"""
return self._loyalty
@loyalty.setter
def loyalty(self, loyalty):
"""Sets the loyalty of this FeatureFlags.
Whether the account has access to the loyalty features or not # noqa: E501
:param loyalty: The loyalty of this FeatureFlags. # noqa: E501
:type: bool
"""
self._loyalty = loyalty
@property
def coupons_without_count(self):
"""Gets the coupons_without_count of this FeatureFlags. # noqa: E501
Whether the account queries coupons with or without total result size # noqa: E501
:return: The coupons_without_count of this FeatureFlags. # noqa: E501
:rtype: bool
"""
return self._coupons_without_count
@coupons_without_count.setter
def coupons_without_count(self, coupons_without_count):
"""Sets the coupons_without_count of this FeatureFlags.
Whether the account queries coupons with or without total result size # noqa: E501
:param coupons_without_count: The coupons_without_count of this FeatureFlags. # noqa: E501
:type: bool
"""
self._coupons_without_count = coupons_without_count
@property
def beta_effects(self):
"""Gets the beta_effects of this FeatureFlags. # noqa: E501
Whether the account can test beta effects or not # noqa: E501
:return: The beta_effects of this FeatureFlags. # noqa: E501
:rtype: bool
"""
return self._beta_effects
@beta_effects.setter
def beta_effects(self, beta_effects):
"""Sets the beta_effects of this FeatureFlags.
Whether the account can test beta effects or not # noqa: E501
:param beta_effects: The beta_effects of this FeatureFlags. # noqa: E501
:type: bool
"""
self._beta_effects = beta_effects
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FeatureFlags):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FeatureFlags):
return True
return self.to_dict() != other.to_dict()
| 33.581731
| 647
| 0.627774
|
794e9b548a9825ac67ef37d111bd6ab1005f9d95
| 502
|
py
|
Python
|
automation/gui1.py
|
ArkAngeL43/automation
|
f8ad20470a3dd2a180c93d84d170dcdd7bb1c45f
|
[
"MIT"
] | 1
|
2021-07-10T15:43:22.000Z
|
2021-07-10T15:43:22.000Z
|
automation/gui1.py
|
ArkAngeL43/automation
|
f8ad20470a3dd2a180c93d84d170dcdd7bb1c45f
|
[
"MIT"
] | null | null | null |
automation/gui1.py
|
ArkAngeL43/automation
|
f8ad20470a3dd2a180c93d84d170dcdd7bb1c45f
|
[
"MIT"
] | null | null | null |
from tkinter import *
root = Tk()
root.geometry("400x400")
bg = PhotoImage(file = "pop.ppm")
# Show image using label
label1 = Label( root, image = bg)
label1.place(x = 0, y = 0)
label2 = Label( root, text = "Welcome")
label2.pack(pady = 50)
frame1 = Frame(root)
frame1.pack(pady = 20 )
button1 = Button(frame1,text="happy")
button1.pack(pady=20)
button2 = Button( frame1, text = "fucking")
button2.pack(pady = 20)
button3 = Button( frame1, text = "BDAYYYY MF")
button3.pack(pady = 20)
root.mainloop()
| 27.888889
| 46
| 0.689243
|
794e9bea78c76ce0e1c4bd01090211e6af6b34f1
| 3,644
|
py
|
Python
|
sbo_sphinx/jsdoc.py
|
safarijv/sbo-sphinx
|
7a8efb7c49488131c90c19ef1a1563f595630a36
|
[
"BSD-2-Clause"
] | 3
|
2015-06-28T16:21:05.000Z
|
2018-08-30T09:48:06.000Z
|
sbo_sphinx/jsdoc.py
|
safarijv/sbo-sphinx
|
7a8efb7c49488131c90c19ef1a1563f595630a36
|
[
"BSD-2-Clause"
] | 5
|
2015-01-29T22:12:52.000Z
|
2015-09-22T19:15:21.000Z
|
sbo_sphinx/jsdoc.py
|
safarijv/sbo-sphinx
|
7a8efb7c49488131c90c19ef1a1563f595630a36
|
[
"BSD-2-Clause"
] | null | null | null |
# encoding: utf-8
# Created by Jeremy Bowman at Thu Feb 6 17:41:45 2014
# Copyright (c) 2014 Safari Books Online, LLC. All rights reserved.
"""
Sphinx extension that uses jsdoc-toolkit and jsdoc-toolkit-rst-template to
generate JavaScript API documentation. Depends on having JSDoc formatted
comments in the source code, without that this won't do much. There are three
relevant Sphinx configuration variables:
* ``jsdoc_source_root`` - The path relative to conf.py of the directory
containing all of the JavaScript files to be documented (``".."`` by default)
* ``jsdoc_output_root`` - The path relative to conf.py of the directory in
which to put the generated reST files (``"javascript"`` by default).
* ``jsdoc_exclude`` - A list of regular expressions; files and directories
matching any of them will be omitted from the documentation (an empty list
by default). The regular expressions should be strings using Java's regex
syntax.
The generated files are left in place between builds so they can be inspected.
The output directory should typically be added to .gitignore so the
intermediate files aren't accidentally committed.
External requirements: java, ant
"""
import os
from shutil import rmtree
from subprocess import Popen
from sphinx.errors import SphinxError
SOURCE_PATH = os.path.abspath(os.path.dirname(__file__))
class JSDocError(SphinxError):
category = 'jsdoc'
def generate_docs(app):
""" Generate the reST documentation files for the JavaScript code """
# Figure out the correct directories to use
config = app.config
config_dir = app.env.srcdir
javascript_root = os.path.join(config_dir, config.jsdoc_source_root)
if javascript_root[-1] != os.path.sep:
javascript_root += os.path.sep
if not javascript_root:
return
output_root = os.path.join(config_dir, config.jsdoc_output_root)
execution_dir = os.path.join(config_dir, '..')
exclude = config.jsdoc_exclude
# Remove any files generated by earlier builds
cleanup(output_root)
# Generate the actual reST files
jsdoc_toolkit_dir = os.path.join(SOURCE_PATH, 'jsdoc-toolkit')
jsdoc_rst_dir = os.path.join(SOURCE_PATH, 'jsdoc-toolkit-rst-template')
build_xml_path = os.path.join(jsdoc_rst_dir, 'build.xml')
command = ['ant', '-f', build_xml_path,
'-Djsdoc-toolkit.dir=%s' % jsdoc_toolkit_dir,
'-Djs.src.dir=%s' % javascript_root,
'-Djs.rst.dir=%s' % output_root]
if exclude:
exclude_args = ['--exclude=\\"%s\\"' % path for path in exclude]
command.append('-Djs.exclude="%s"' % ' '.join(exclude_args))
try:
process = Popen(command, cwd=execution_dir)
process.wait()
except OSError:
raise JSDocError('Error running ant; is it installed?')
# Convert the absolute paths in the file listing to relative ones
path = os.path.join(output_root, 'files.rst')
with open(path, 'r') as f:
content = f.read()
content = content.replace(javascript_root, '')
with open(path, 'w') as f:
f.write(content)
def cleanup(output_root):
"""Remove any reST files which were generated by this extension"""
if os.path.exists(output_root):
if os.path.isdir(output_root):
rmtree(output_root)
else:
os.remove(output_root)
def setup(app):
"""Sphinx extension entry point"""
app.add_config_value('jsdoc_source_root', '..', 'env')
app.add_config_value('jsdoc_output_root', 'javascript', 'env')
app.add_config_value('jsdoc_exclude', [], 'env')
app.connect('builder-inited', generate_docs)
| 36.44
| 79
| 0.699232
|
794e9c7a13659b9babebcbc3c70a74bf1a3f13e9
| 454
|
py
|
Python
|
numba/rewrites/__init__.py
|
TejasAvinashShetty/numba
|
5f474010f8f50b3cf358125ba279d345ae5914ef
|
[
"BSD-2-Clause"
] | null | null | null |
numba/rewrites/__init__.py
|
TejasAvinashShetty/numba
|
5f474010f8f50b3cf358125ba279d345ae5914ef
|
[
"BSD-2-Clause"
] | null | null | null |
numba/rewrites/__init__.py
|
TejasAvinashShetty/numba
|
5f474010f8f50b3cf358125ba279d345ae5914ef
|
[
"BSD-2-Clause"
] | null | null | null |
import numba.core.errors as _errors
from numba.core.utils import PYVERSION as _PYVERSION
_moved_mod = "numba.core.rewrites"
if _PYVERSION >= (3, 7):
__getattr__ = _errors.deprecate_moved_module_getattr(__name__, _moved_mod)
else:
from numba.core.rewrites.registry import (
register_rewrite, # noqa: F401
rewrite_registry, # noqa: F401
Rewrite,
) # noqa: F401
_errors.deprecate_moved_module(__name__, _moved_mod)
| 28.375
| 78
| 0.729075
|
794e9c94793fe9e63bb19cb346a16ff45b0d00d6
| 804
|
py
|
Python
|
pylaas_core/interface/core/service_interface.py
|
Agi-dev/pylaas_core
|
c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2
|
[
"MIT"
] | null | null | null |
pylaas_core/interface/core/service_interface.py
|
Agi-dev/pylaas_core
|
c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2
|
[
"MIT"
] | 2
|
2021-03-25T21:30:41.000Z
|
2021-06-01T21:25:37.000Z
|
pylaas_core/interface/core/service_interface.py
|
Agi-dev/pylaas_core
|
c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2
|
[
"MIT"
] | null | null | null |
import abc
class ServiceInterface(abc.ABC):
@abc.abstractmethod
def set_adapter(self, adapter):
"""Set service adapter
Args:
adapter:
Returns:
cls
"""
pass
@abc.abstractmethod
def get_adapter(self):
"""Get current adapter
Returns:
adapter
"""
pass
@abc.abstractmethod
def has_adapter(self):
"""Check if an adapter exists
Returns:
bool
"""
pass
@staticmethod
@abc.abstractmethod
def get_service(service_id) -> 'ServiceInterface':
"""
Get service
Args:
service_id (string): service id to instantiate from container
Returns:
ServiceInterface
"""
| 17.866667
| 73
| 0.521144
|
794e9cb603a3b80791571c146902929442ac1bd2
| 66,506
|
py
|
Python
|
code_rt_sd/sd/plotlib.py
|
shibaji7/Collaboration_NCAR
|
c27e0ad8a1f0c6b2e66fa07e6cf57f98c4389899
|
[
"Apache-2.0"
] | 1
|
2021-11-12T14:40:49.000Z
|
2021-11-12T14:40:49.000Z
|
code_rt_sd/sd/plotlib.py
|
shibaji7/Collaboration_NCAR
|
c27e0ad8a1f0c6b2e66fa07e6cf57f98c4389899
|
[
"Apache-2.0"
] | null | null | null |
code_rt_sd/sd/plotlib.py
|
shibaji7/Collaboration_NCAR
|
c27e0ad8a1f0c6b2e66fa07e6cf57f98c4389899
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""plotlib.py: module is dedicated to plottting."""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import matplotlib
matplotlib.use("Agg")
import datetime as dt
from matplotlib.collections import LineCollection
from mpl_toolkits.axes_grid1 import SubplotDivider, Size
from mpl_toolkits.axes_grid1.mpl_axes import Axes
import matplotlib.pyplot as plt
from pylab import gca, gcf
import numpy as np
from matplotlib.transforms import Affine2D, Transform
import mpl_toolkits.axisartist.floating_axes as floating_axes
from matplotlib.projections import polar
from mpl_toolkits.axisartist.grid_finder import FixedLocator, DictFormatter
from types import MethodType
import glob
import pandas as pd
from dateutil import tz
from scipy.io import loadmat
import copy
from scipy.stats import skewnorm
from scipy.integrate import trapz
from scipy import signal
import sys
sys.path.append("sd_cartopy/")
import rad_fov
from fov import *
#from PyIF import te_compute as te
#from sklearn.feature_selection import mutual_info_regression as MIR
#from SALib.sample import saltelli
#from SALib.analyze import sobol
#from SALib.analyze import rbd_fast
import itertools
from math import pi
from matplotlib.legend_handler import HandlerPatch
font = {'size' : 8}
matplotlib.rc('font', **font)
class HandlerCircle(HandlerPatch):
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
p = plt.Circle(xy=center, radius=orig_handle.radius)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
INT_F = 300
INT_R = 300
import utils
def textHighlighted(xy, text, ax=None, color="k", fontsize=None, xytext=(0,0),
zorder=None, text_alignment=(0,0), xycoords="data",
textcoords="offset points", **kwargs):
"""
Plot highlighted annotation (with a white lining)
Parameters
----------
xy : position of point to annotate
text : str text to show
ax : Optional[ ]
color : Optional[char]
text color; deafult is "k"
fontsize : Optional [ ] text font size; default is None
xytext : Optional[ ] text position; default is (0, 0)
zorder : text zorder; default is None
text_alignment : Optional[ ]
xycoords : Optional[ ] xy coordinate[1]; default is "data"
textcoords : Optional[ ] text coordinate[2]; default is "offset points"
**kwargs :
Notes
-----
Belongs to class rbspFp.
References
----------
[1] see `matplotlib.pyplot.annotate
<http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.annotate>`)
[2] see `matplotlib.pyplot.annotate
<http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.annotate>`)
"""
if ax is None: ax = gca()
text_path = mp.text.TextPath((0, 0), text, size=fontsize, **kwargs)
p1 = matplotlib.patches.PathPatch(text_path, ec="w", lw=4, fc="w", alpha=0.7,
zorder=zorder, transform=mp.transforms.IdentityTransform())
p2 = matplotlib.patches.PathPatch(text_path, ec="none", fc=color, zorder=zorder,
transform=mp.transforms.IdentityTransform())
offsetbox2 = matplotlib.offsetbox.AuxTransformBox(mp.transforms.IdentityTransform())
offsetbox2.add_artist(p1)
offsetbox2.add_artist(p2)
ab = mp.offsetbox.AnnotationBbox(offsetbox2, xy, xybox=xytext, xycoords=xycoords,
boxcoords=textcoords, box_alignment=text_alignment, frameon=False)
ab.set_zorder(zorder)
ax.add_artist(ab)
return
def addColorbar(mappable, ax):
"""
Append colorbar to axes
Parameters
----------
mappable : a mappable object
ax : an axes object
Returns
-------
cbax : colorbar axes object
Notes
-----
This is mostly useful for axes created with :func:`curvedEarthAxes`.
"""
fig1 = ax.get_figure()
divider = SubplotDivider(fig1, *ax.get_geometry(), aspect=True)
# axes for colorbar
cbax = Axes(fig1, divider.get_position())
h = [Size.AxesX(ax), # main axes
Size.Fixed(0.05), # padding
Size.Fixed(0.1)] # colorbar
v = [Size.AxesY(ax)]
_ = divider.set_horizontal(h)
_ = divider.set_vertical(v)
_ = ax.set_axes_locator(divider.new_locator(nx=0, ny=0))
_ = cbax.set_axes_locator(divider.new_locator(nx=2, ny=0))
_ = fig1.add_axes(cbax)
_ = cbax.axis["left"].toggle(all=False)
_ = cbax.axis["top"].toggle(all=False)
_ = cbax.axis["bottom"].toggle(all=False)
_ = cbax.axis["right"].toggle(ticklabels=True, label=True)
_ = plt.colorbar(mappable, cax=cbax, shrink=0.1)
return cbax
def curvedEarthAxes(rect=111, fig=None, minground=0., maxground=2000, minalt=0,
maxalt=500, Re=6371., nyticks=5, nxticks=4):
"""
Create curved axes in ground-range and altitude
Parameters
----------
rect : Optional[int] subplot spcification
fig : Optional[pylab.figure object] (default to gcf)
minground : Optional[float]
maxground : Optional[int] maximum ground range [km]
minalt : Optional[int] lowest altitude limit [km]
maxalt : Optional[int] highest altitude limit [km]
Re : Optional[float] Earth radius in kilometers
nyticks : Optional[int] Number of y axis tick marks; default is 5
nxticks : Optional[int] Number of x axis tick marks; deafult is 4
Returns
-------
ax : matplotlib.axes object containing formatting
aax : matplotlib.axes objec containing data
"""
ang = maxground / Re
minang = minground / Re
angran = ang - minang
angle_ticks = [(0, "{:.0f}".format(minground))]
while angle_ticks[-1][0] < angran:
tang = angle_ticks[-1][0] + 1./nxticks*angran
angle_ticks.append((tang, "{:.0f}".format((tang-minang)*Re)))
grid_locator1 = FixedLocator([v for v, s in angle_ticks])
tick_formatter1 = DictFormatter(dict(angle_ticks))
altran = float(maxalt - minalt)
alt_ticks = [(minalt+Re, "{:.0f}".format(minalt))]
while alt_ticks[-1][0] < Re+maxalt:
alt_ticks.append((altran / float(nyticks) + alt_ticks[-1][0],
"{:.0f}".format(altran / float(nyticks) +
alt_ticks[-1][0] - Re)))
_ = alt_ticks.pop()
grid_locator2 = FixedLocator([v for v, s in alt_ticks])
tick_formatter2 = DictFormatter(dict(alt_ticks))
tr_rotate = Affine2D().rotate(np.pi/2-ang/2)
tr_shift = Affine2D().translate(0, Re)
tr = polar.PolarTransform() + tr_rotate
grid_helper = floating_axes.GridHelperCurveLinear(tr, extremes=(0, angran, Re+minalt, Re+maxalt),
grid_locator1=grid_locator1, grid_locator2=grid_locator2, tick_formatter1=tick_formatter1,
tick_formatter2=tick_formatter2,)
if not fig: fig = plt.figure(figsize=(5,3), dpi=240)
ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)
# adjust axis
print("adjust ax")
ax1.set_ylabel(r"Height, $km$", fontdict={"size":2})
ax1.set_xlabel(r"Ground Range, $km$", fontdict={"size":2})
ax1.invert_xaxis()
ax1.minground = minground
ax1.maxground = maxground
ax1.minalt = minalt
ax1.maxalt = maxalt
ax1.Re = Re
fig.add_subplot(ax1, transform=tr)
# create a parasite axes whose transData in RA, cz
aux_ax = ax1.get_aux_axes(tr)
# for aux_ax to have a clip path as in ax
aux_ax.patch = ax1.patch
# but this has a side effect that the patch is drawn twice, and possibly
# over some other artists. So, we decrease the zorder a bit to prevent this.
ax1.patch.zorder=0.9
return ax1, aux_ax
def plot_edens(time, beam=None, maxground=2000, maxalt=500,
nel_cmap="jet", nel_lim=[10, 12], title=False,
fig=None, rect=111, ax=None, aax=None,plot_colorbar=True,
nel_rasterize=False):
"""
Plot electron density profile
Parameters
----------
time : datetime.datetime time of profile
beam : Optional[ ] beam number
maxground : Optional[int]
maximum ground range [km]
maxalt : Optional[int] highest altitude limit [km]
nel_cmap : Optional[str] color map name for electron density index coloring
nel_lim : Optional[list, int] electron density index plotting limits
title : Optional[bool] Show default title
fig : Optional[pylab.figure] object (default to gcf)
rect : Optional[int] subplot spcification
ax : Optional[ ] Existing main axes
aax : Optional[ ] Existing auxialary axes
plot_colorbar : Optional[bool] Plot a colorbar
nel_rasterize : Optional[bool] Rasterize the electron density plot
Returns
-------
ax : matplotlib.axes object containing formatting
aax : matplotlib.axes object containing data
cbax : matplotlib.axes object containing colorbar
"""
return
def get_polar(d, Re=6371.):
""" Convert to polar coordinates """
th = d.grange / Re
r = d.height + Re
dop, sth, dth = d.dop, d.sth, d.dth
return th, r, dop, sth, dth
def plot_rays(dic, time, ti, beam, case, txt, maxground=2000, maxalt=500, step=1,
showrefract=False, nr_cmap="jet_r", nr_lim=[0.0, .1],
raycolor="0.3", title=True, zorder=2, alpha=1,
fig=None, rect=111, ax=None, aax=None):
"""
Plot ray paths
Parameters
----------
dic: str location of the data files
time: datetime.datetime time of rays
ti: int time index
beam: beam number
maxground : Optional[int] maximum ground range [km]
maxalt : Optional[int] highest altitude limit [km]
step : Optional[int] step between each plotted ray (in number of ray steps)
showrefract : Optional[bool] show refractive index along ray paths (supersedes raycolor)
nr_cmap : Optional[str] color map name for refractive index coloring
nr_lim : Optional[list, float] refractive index plotting limits
raycolor : Optional[float] color of ray paths
title : Optional[bool] Show default title
zorder : Optional[int]
alpha : Optional[int]
fig : Optional[pylab.figure] object (default to gcf)
rect : Optional[int] subplot spcification
ax : Optional[ ] Existing main axes
aax : Optional[ ] Existing auxialary axes
Returns
-------
ax : matplotlib.axes object containing formatting
aax : matplotlib.axes object containing data
cbax : matplotlib.axes object containing colorbar
"""
if not ax and not aax: ax, aax = curvedEarthAxes(fig=fig, rect=rect, maxground=maxground, maxalt=maxalt)
else:
if hasattr(ax, "time"): time = ax.time
if hasattr(ax, "beam"): beam = ax.beam
files = glob.glob(dic + "ti({ti}).bm({bm}).elv(*).{case}.csv".format(ti=ti, bm=beam, case=case))
files.sort()
for f in files:
th, r, v, _, _ = get_polar(pd.read_csv(f))
if not showrefract: aax.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha)
else:
points = np.array([th, r]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lcol = LineCollection( segments, zorder=zorder, alpha=alpha)
_ = lcol.set_cmap( nr_cmap )
_ = lcol.set_norm( plt.Normalize(*nr_lim) )
_ = lcol.set_array( v )
_ = aax.add_collection( lcol )
if title:
stitle = "%s UT"%time.strftime("%Y-%m-%d %H:%M")
ax.set_title( stitle )
ax.text(1.05, 0.5, txt, horizontalalignment="center", verticalalignment="center",
transform=ax.transAxes, rotation=90)
if showrefract:
cbax = addColorbar(lcol, ax)
_ = cbax.set_ylabel(r"$\Delta$ f")
else: cbax = None
ax.beam = beam
fig = ax.get_figure()
fig.savefig(dic + "rt.ti({ti}).bm({bm}).{case}.png".format(ti=ti, bm=beam, case=case), bbox_inches="tight")
plt.close()
return ax, aax, cbax
def plot_exp_rays(dic, time, beam, cat="bgc", maxground=2000, maxalt=300, step=1,
showrefract=False, nr_cmap="jet_r", nr_lim=[0.8, 1.],
raycolor="0.3", title=False, zorder=2, alpha=1,
fig=None, rect=111, ax=None, aax=None):
""" Plot ray paths (previous method) """
if not ax and not aax: ax, aax = curvedEarthAxes(fig=fig, rect=rect, maxground=maxground, maxalt=maxalt)
else:
if hasattr(ax, "time"): time = ax.time
if hasattr(ax, "beam"): beam = ax.beam
files = glob.glob(dic + "exp.{cat}.bm({bm}).elv(*).csv".format(cat=cat, bm=beam))
files.sort()
for f in files:
th, r, v, _, _ = get_polar(pd.read_csv(f))
if not showrefract: aax.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha)
else:
points = np.array([th, r]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lcol = LineCollection( segments, zorder=zorder, alpha=alpha)
_ = lcol.set_cmap( nr_cmap )
_ = lcol.set_norm( plt.Normalize(*nr_lim) )
_ = lcol.set_array( v )
_ = aax.add_collection( lcol )
if title:
stitle = ""
ax.set_title( stitle )
if showrefract:
cbax = addColorbar(lcol, ax)
_ = cbax.set_ylabel(r"$\Delta$ f")
else: cbax = None
ax.beam = beam
fig = ax.get_figure()
fig.savefig(dic + "rt.exp.{cat}.bm({bm}).png".format(cat=cat, bm=beam), bbox_inches="tight")
plt.close()
return ax, aax, cbax
def plot_radstn(p,f,pz,fz,fname,lat,lon,t,zone="America/New_York"):
""" Plot radar vertical dataset """
fig = plt.figure(figsize=(4,4), dpi=120)
ax = fig.add_subplot(111)
ax.set_ylabel("Alt. [km]")
ax.set_xlabel(r"EDens [$cm^{-3}$]")
ax.semilogx(p, pz, "r")
ax.semilogx(f, fz, "r--")
ax.set_ylim(50, 130)
ax.set_xlim(1e2, 1e7)
sza = utils.calculate_sza(t, lat, lon, alt=300)
l = t.replace(tzinfo=tz.gettz("UTC")).astimezone(tz.gettz("America/New_York"))
ax.set_title(r"UT-%s"%(t.strftime("%Y-%m-%d %H:%M")))
ax.text(1.05, 0.5, "Loc:(%.1f,%.1f), $\chi$-%.1f, LT-%s"%(lat, lon, sza, l.strftime("%H:%M")),
horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, rotation=90)
fig.savefig(fname,bbox_inches="tight")
plt.close()
return
def plot_velocity_ts(dn, rad, bmnum):
""" Plot velocity TS data """
fig = plt.figure(figsize=(6,6), dpi=150)
axs = [fig.add_subplot(311), fig.add_subplot(312), fig.add_subplot(313)]
mkeys = ["vn", "vh", "vt"]
fmt = matplotlib.dates.DateFormatter("%H:%M")
fname = "data/sim/{dn}/{rad}/velocity.ts.csv".format(dn=dn.strftime("%Y.%m.%d.%H.%M"), rad=rad)
sdat = pd.read_csv(fname, parse_dates=["dn"])
axs[0].set_title("%s UT, Radar - %s, Beam - %d"%(dn.strftime("%Y.%m.%d.%H.%M"), rad, bmnum))
cols = ["r", "b", "k"]
labs = [r"$V_{d\eta}$", r"$V_{dh}$", r"$V_{t}$"]
I = 0
fname = "data/sim/{dn}/{rad}/sd_data.csv.gz".format(dn=dn.strftime("%Y.%m.%d.%H.%M"), rad=rad)
dat = utils.get_sd_data(fname, 15).dropna()
dat = dat.groupby("time").mean().reset_index()
for ax, mkey, col, lab in zip(axs, mkeys, cols, labs):
ax.set_ylabel(r"Velocity, $ms^{-1}$")
ax.set_xlabel("Time, UT")
ax.xaxis.set_major_formatter(fmt)
yerr = np.array([(mn, mx) for mn, mx in zip(sdat[mkey+"_min"], sdat[mkey+"_max"])]).T
ax.errorbar(sdat.dn, sdat[mkey], yerr=yerr,
mec=col, mfc=col, fmt="r^", ms=1.5, ls="None", ecolor=col,
capsize=1, capthick=.4, elinewidth=0.4,
alpha=0.5, label=lab)
if I == 2:
ax.plot(dat.time, dat.v, color="darkgreen", marker="o",
alpha=0.3, ls="None", markersize=0.5, label=r"$V_{sd}^{los}$")
ax.plot(dat.time, dat.v, color="darkred", marker="o",
alpha=0.3, ls="None", markersize=0.8)
ax.axhline(0, color="gray", ls="--", lw=0.6)
ax.legend(loc=1)
ax.set_ylim(10*int((np.min(sdat[mkey]+sdat[mkey+"_min"])/10)-1),
10*int((np.max(sdat[mkey]+sdat[mkey+"_max"])/10)+1))
ax.set_xlim(sdat.dn.tolist()[0], sdat.dn.tolist()[-1])
I += 1
fname = "data/sim/{dn}/{rad}/velocity.ts.png".format(dn=dn.strftime("%Y.%m.%d.%H.%M"), rad=rad)
fig.savefig(fname,bbox_inches="tight")
return
def plot_radstn_base(b,p,f,ht,fname,lat,lon,t,zone="America/New_York"):
""" Plot radar vertical dataset """
fig = plt.figure(figsize=(4,4), dpi=120)
ax = fig.add_subplot(111)
ax.set_ylabel("Alt. [km]")
ax.set_xlabel(r"EDens [$cm^{-3}$]")
ax.semilogx(b, ht, "k", label="Background")
ax.semilogx(p, ht, "r", label=r"$UT_{-1}$")
ax.semilogx(f, ht, "r--", label="UT")
ax.legend(loc=4)
ax.set_ylim(50, 130)
ax.set_xlim(1e2, 1e7)
sza = utils.calculate_sza(t, lat, lon, alt=300)
l = t.replace(tzinfo=tz.gettz("UTC")).astimezone(tz.gettz("America/New_York"))
ax.set_title(r"UT-%s"%(t.strftime("%Y-%m-%d %H:%M")))
ax.text(1.05, 0.5, "Loc:(%.1f,%.1f), $\chi$-%.1f, LT-%s"%(lat, lon, sza, l.strftime("%H:%M")),
horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, rotation=90)
fig.savefig(fname,bbox_inches="tight")
plt.close()
return
def plot_rays_base(dic, time, ti, beam, case, txt, maxground=2000, maxalt=500, step=1,
showrefract=False, nr_cmap="Blues", nr_lim=[-0.5, 0.5],
raycolor="0.3", title=True, zorder=2, alpha=1,
fig=None, rect=111, ax=None, aax=None, freq=12.):
"""
Plot ray paths
Parameters
----------
dic: str location of the data files
time: datetime.datetime time of rays
ti: int time index
beam: beam number
maxground : Optional[int] maximum ground range [km]
maxalt : Optional[int] highest altitude limit [km]
step : Optional[int] step between each plotted ray (in number of ray steps)
showrefract : Optional[bool] show refractive index along ray paths (supersedes raycolor)
nr_cmap : Optional[str] color map name for refractive index coloring
nr_lim : Optional[list, float] refractive index plotting limits
raycolor : Optional[float] color of ray paths
title : Optional[bool] Show default title
zorder : Optional[int]
alpha : Optional[int]
fig : Optional[pylab.figure] object (default to gcf)
rect : Optional[int] subplot spcification
ax : Optional[ ] Existing main axes
aax : Optional[ ] Existing auxialary axes
Returns
-------
ax : matplotlib.axes object containing formatting
aax : matplotlib.axes object containing data
cbax : matplotlib.axes object containing colorbar
"""
if not ax and not aax: ax, aax = curvedEarthAxes(fig=fig, rect=rect, maxground=maxground, maxalt=maxalt)
else:
if hasattr(ax, "time"): time = ax.time
if hasattr(ax, "beam"): beam = ax.beam
files = glob.glob(dic + "ti({ti})_elv(*)_{case}.csv".format(ti="%02d"%ti, case=case))
files.sort()
Re = 6371.
for f in files:
th, r, v, _, _ = get_polar(pd.read_csv(f))
v = (0.5 * v * 3e8 / (freq * 1e6))
if not showrefract: aax.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha)
else:
points = np.array([th, r]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lcol = LineCollection( segments, zorder=zorder, alpha=alpha)
_ = lcol.set_cmap( nr_cmap )
_ = lcol.set_norm( plt.Normalize(*nr_lim) )
_ = lcol.set_array( utils.smooth(v, window_len=21) )
_ = aax.add_collection( lcol )
aax.plot(np.arange(0,2000)/Re, np.ones(2000)*60+Re, color="b", ls="--", lw=0.5)
aax.plot(np.arange(0,2000)/Re, np.ones(2000)*95+Re, color="orange", ls="--", lw=0.5)
aax.plot(np.arange(0,2000)/Re, np.ones(2000)*130+Re, color="r", ls="--", lw=0.5)
if not showrefract and title:
stitle = "%s UT"%time.strftime("%Y-%m-%d %H:%M")
ax.set_title( stitle )
ax.text(1.05, 0.5, txt, horizontalalignment="center", verticalalignment="center",
transform=ax.transAxes, rotation=90)
if showrefract:
cbax = addColorbar(lcol, ax)
_ = cbax.set_ylabel(r"$\Delta$ V (m/s)")
stitle = "%s UT"%time.strftime("%Y-%m-%d %H:%M")+ "\n" + "Radar: BKS, Beam: %02d"%beam + "\n" +\
"Frequency: %.1f MHz"%freq + "\n"
ax.text(0.5, 0.8, stitle + txt + "(m/s)", horizontalalignment="center", verticalalignment="center",
transform=ax.transAxes)
else: cbax = None
ax.beam = beam
fig = ax.get_figure()
#fig.savefig(dic + "rt.ti({ti}).{case}.png".format(ti="%02d"%ti, case=case), bbox_inches="tight")
fig.savefig(dic + "rt.ti({ti}).{case}.png".format(ti="%02d"%ti, case=case))
plt.close()
return ax, aax, cbax
def plot_region_distribution(vd, ve, vf):
from scipy import stats
fig = plt.figure(figsize=(4,4), dpi=120)
ax = fig.add_subplot(111)
ax.hist(vd, bins=np.arange(0,1,.01), color="r", alpha=0.5, density=True, label=r"$\frac{v_D}{v_T}$", histtype="step")
ax.hist(ve, bins=np.arange(0,1,.01), color="b", alpha=0.5, density=True, label=r"$\frac{v_E}{v_T}$", histtype="step")
ax.hist(vf, bins=np.arange(0,1,.01), color="g", alpha=0.5, density=True, label=r"$\frac{v_F}{v_T}$", histtype="step")
ax.set_xlim(0,1)
ax.legend(loc=1)
ax.set_ylabel(r"Density ($\frac{V_x}{V_T}$)")
ax.set_xlabel(r"$\frac{V_x}{V_T}$")
fig.savefig("data/hist_reg.png", bbox_inches="tight")
return
def plot_distribution(vn, vf):
from scipy import stats
fig = plt3figure(figsize=(4,4), dpi=120)
ax = fig.add_subplot(111)
ax.hist(vn, bins=np.arange(0,1,.01), color="r", alpha=0.5, density=True, label=r"$\frac{V_{d\eta}}{V_T}$", histtype="step")
ax.hist(vf, bins=np.arange(0,1,.01), color="b", alpha=0.5, density=True, label=r"$\frac{V_{dh}}{V_T}$", histtype="step")
ax.set_xlim(0,1)
ax.legend(loc=1)
ax.set_ylabel(r"Density $(\frac{V_x}{V_T})$")
ax.set_xlabel(r"$\frac{V_x}{V_T}$")
fig.savefig("data/hist.png", bbox_inches="tight")
return
def plot_htstogram(vd, ve, vf, vn, vh):
from scipy.stats import beta
fig = plt.figure(figsize=(6,3), dpi=150)
ax = fig.add_subplot(121)
#x = np.arange(0,1,0.001)
#a, b, _, _ = beta.fit(vn,floc=0,fscale=1)
ax.hist(vn, bins=np.arange(0,1,.01), color="r", alpha=0.5, density=True, label=r"$\frac{V_{d\eta}}{V_T}[\mu=%.2f]$"%np.mean(vn)
, histtype="step")
#ax.plot(x, beta.pdf(x, a, b), color="r", lw=0.8, label=r"$\frac{V_{d\eta}}{V_T}[\mu=%.2f]$"%(a/(a+b)))
ax.axvline(np.mean(vn), ls="--", color="r", lw=0.6)
#a, b, _, _ = beta.fit(vh,floc=0,fscale=1)
#ax.plot(x, beta.pdf(x, a, b), color="b", lw=0.8, label=r"$\frac{V_{dh}}{V_T}[\mu=%.2f]$"%(a/(a+b)))
ax.axvline(np.mean(vh), ls="--", color="b", lw=0.6)
ax.hist(vh, bins=np.arange(0,1,.01), color="b", alpha=0.5, density=True, label=r"$\frac{V_{dh}}{V_T}[\mu=%.2f]$"%np.mean(vh),
histtype="step")
ax.text(0.1,0.9, "(a)", horizontalalignment="center", verticalalignment="center", transform=ax.transAxes)
ax.set_xlim(0,1)
ax.set_ylim(0,20)
ax.legend(loc=1, prop={"size": 8})
ax.set_ylabel(r"Density $\left(\frac{V_x}{V_T}\right)$")
ax.set_xlabel(r"$\frac{V_x}{V_T}$")
ax = fig.add_subplot(122)
#a, b, _, _ = beta.fit(vd,floc=0,fscale=1)
#ax.plot(x, beta.pdf(x, a, b), color="r", lw=0.8, label=r"$\frac{V_D}{V_T}[\mu=%.2f]$"%(a/(a+b)))
ax.axvline(np.mean(vd), ls="--", color="r", lw=0.6)
ax.hist(vd, bins=np.arange(0,1,.01), color="r", alpha=0.5, density=True, label=r"$\frac{V_D}{V_T}[\mu=%.2f]$"%np.mean(vd),
histtype="step")
#a, b, _, _ = beta.fit(ve,floc=0,fscale=1)
#ax.plot(x, beta.pdf(x, a, b), color="g", lw=0.8, label=r"$\frac{V_E}{V_T}[\mu=%.2f]$"%(a/(a+b)))
ax.axvline(np.mean(ve), ls="--", color="g", lw=0.6)
ax.hist(ve, bins=np.arange(0,1,.01), color="g", alpha=0.5, density=True, label=r"$\frac{V_E}{V_T}[\mu=%.2f]$"%np.mean(ve),
histtype="step")
#a, b, _, _ = beta.fit(vf,floc=0,fscale=1)
#ax.plot(x, beta.pdf(x, a, b), color="b", lw=0.8, label=r"$\frac{V_F}{V_T}[\mu=%.2f]$"%(a/(a+b)))
ax.axvline(np.mean(vf), ls="--", color="b", lw=0.6)
ax.hist(vf, bins=np.arange(0,1,.01), color="b", alpha=0.5, density=True, label=r"$\frac{V_F}{V_T}[\mu=%.2f]$"%np.mean(vf),
histtype="step")
ax.set_ylim(0,50)
#ax.hist(vd, bins=np.arange(0,1,.01), color="r", alpha=0.5, density=True, label=r"$\frac{v_D}{v_T}$", histtype="step")
#ax.hist(ve, bins=np.arange(0,1,.01), color="b", alpha=0.5, density=True, label=r"$\frac{v_E}{v_T}$", histtype="step")
#ax.hist(vf, bins=np.arange(0,1,.01), color="g", alpha=0.5, density=True, label=r"$\frac{v_F}{v_T}$", histtype="step")
ax.text(0.1,0.9, "(b)", horizontalalignment="center", verticalalignment="center", transform=ax.transAxes)
ax.set_xlim(0,1)
ax.legend(loc=1, prop={"size": 8})
ax.set_xlabel(r"$\frac{V_x}{V_T}$")
fig.savefig("data/hist.png", bbox_inches="tight")
return
class FanPlot(object):
""" Plot Fan Dataset """
def __init__(self, nrange=75, nbeam=24, r0=180, dr=45, dtheta=3.24, theta0=None):
"""
Initialize the fanplot do a certain size.
:param nrange: number of range gates
:param nbeam: number of beams
:param r0: initial beam distance - any distance unit as long as it"s consistent with dr
:param dr: length of each radar - any distance unit as long as it"s consistent with r0
:param dtheta: degrees per beam gate, degrees (default 3.24 degrees)
"""
# Set member variables
self.nrange = int(nrange)
self.nbeam = int(nbeam)
self.r0 = r0
self.dr = dr
self.dtheta = dtheta
# Initial angle (from X, polar coordinates) for beam 0
if theta0 == None:
self.theta0 = (90 - dtheta * nbeam / 2) # By default, point fanplot towards 90 deg
else:
self.theta0 = theta0
return
def add_axis(self, fig, subplot):
ax = fig.add_subplot(subplot, polar=True)
# Set up ticks and labels
self.r_ticks = range(self.r0, self.r0 + (self.nrange+1) * self.dr, self.dr)
self.theta_ticks = [self.theta0 + self.dtheta * b for b in range(self.nbeam+1)][::4]
rlabels = [""] * len(self.r_ticks)
for i in range(0, len(rlabels), 5):
rlabels[i] = i
plt.rgrids(self.r_ticks, rlabels)
plt.thetagrids(self.theta_ticks, range(self.nbeam+1)[::4])
return ax
def plot(self, ax, beams, gates, color="blue"):
"""
Add some data to the plot in a single color at positions given by "beams" and "gates".
:param beams: a list/array of beams
:param gates: a list/array of gates - same length as beams
:param color: a Matplotlib color
"""
for i, (beam, gate) in enumerate(zip(beams, gates)):
theta = (self.theta0 + beam * self.dtheta) * np.pi / 180 # radians
r = (self.r0 + gate * self.dr) # km
width = self.dtheta * np.pi / 180 # radians
height = self.dr # km
x1, x2 = theta, theta + width
y1, y2 = r, r + height
x = x1, x2, x2, x1
y = y1, y1, y2, y2
ax.fill(x, y, color=color)
self._scale_plot(ax)
return
def _add_colorbar(self, fig, ax, bounds, colormap, label=""):
"""
Add a colorbar to the right of an axis.
Similar to the function in RangeTimePlot, but positioned differently fanplots.
:param fig:
:param ax:
:param bounds:
:param colormap:
:param label:
:return:
"""
import matplotlib as mpl
pos = ax.get_position()
cpos = [pos.x1 + 0.025, pos.y0 + 0.25*pos.height,
0.01, pos.height * 0.5] # this list defines (left, bottom, width, height)
cax = fig.add_axes(cpos)
norm = mpl.colors.BoundaryNorm(bounds[::2], colormap.N)
cb2 = mpl.colorbar.ColorbarBase(cax, cmap=colormap,
norm=norm,
ticks=bounds[::2],
spacing="uniform",
orientation="vertical")
cb2.set_label(label)
# Remove the outer bounds in tick labels
ticks = [str(i) for i in bounds[::2]]
ticks[0], ticks[-1] = "", ""
cb2.ax.set_yticklabels(ticks)
return
def text(self, text, beam, gate, fontsize=8):
theta = (self.theta0 + beam * self.dtheta + 0.8 * self.dtheta) * np.pi / 180
r = (self.r0 + gate * self.dr)
plt.text(theta, r, text, fontsize=fontsize)
return
def save(self, filepath):
plt.tight_layout()
plt.savefig(filepath)
plt.close()
return
def _scale_plot(self, ax):
# Scale min-max
ax.set_thetamin(self.theta_ticks[0])
ax.set_thetamax(self.theta_ticks[-1])
ax.set_rmin(0)
ax.set_rmax(self.r_ticks[-1])
return
def _monotonically_increasing(self, vec):
if len(vec) < 2:
return True
return all(x <= y for x, y in zip(vec[:-1], vec[1:]))
def plot_geo_fov(self, rad, data_dict, scans, name, start, data, skip=1,
vel_max=100, vel_step=10,
save=True, base_filepath=""):
import pydarn
import cartopy
hdw = pydarn.read_hdw_file(rad)
rf = rad_fov.CalcFov(hdw=hdw, ngates=self.nrange, nbeams=self.nbeam)
lons, lats = rf.lonFull, rf.latFull
vel_ranges = list(range(-vel_max, vel_max + 1, vel_step))
vel_ranges.insert(0, -9999)
vel_ranges.append(9999)
vel_cmap = plt.get_cmap("Spectral") # use "viridis" colormap to make this redgreen colorblind proof
vel_colors = vel_cmap(np.linspace(0, 1, len(vel_ranges)))
for i in scans:
scan_time = start + dt.timedelta(minutes=i)
fig = plt.figure(figsize=(10, 5), dpi=150)
dat_ax = fig.add_subplot(121, projection="fovcarto",coords="geo", rad=rad, plot_date=scan_time)
dat_ax.coastlines()
dat_ax.overlay_radar()
dat_ax.overlay_fov(beamLimits=[7,8], lineColor="darkred", lineWidth=0.5, ls="--")
dat_ax.overlay_fov()
dat_ax.grid_on()
dat_ax.enum(bounds=[(int(np.min(lons)/10)-1)*10, (int(np.max(lons)/10)+1)*10, 25, 70])
vel_ax = fig.add_subplot(122, projection="fovcarto",coords="geo", rad=rad, plot_date=scan_time)
vel_ax.coastlines()
vel_ax.overlay_radar()
vel_ax.overlay_fov(beamLimits=[7,8], lineColor="darkred", lineWidth=0.5, ls="--")
vel_ax.overlay_fov()
vel_ax.grid_on()
vel_ax.enum()
Vx = np.zeros((self.nbeam, self.nrange))*np.nan
idbs, idgs = data_dict["beam"][i], data_dict["gate"][i]
vels = data_dict["vel"][i]
for idb, idg, vel in zip(idbs, idgs, vels):
Vx[idb, np.round(idg).astype(int)] = vel
Vx = np.ma.masked_invalid(Vx)
dat_ax.pcolormesh(lons, lats, Vx, transform=cartopy.crs.PlateCarree(), cmap=plt.get_cmap("Spectral"),
vmax=vel_max, vmin=-vel_max)
dat_ax.text(1.02, 0.15, "Simulation", horizontalalignment="center",
verticalalignment="center", transform=dat_ax.transAxes, fontdict={"color":"red"}, rotation=90)
Vmod = np.copy(Vx)
Vx = np.zeros((self.nbeam, self.nrange))*np.nan
idbs, idgs = data["beam"][i], data["gate"][i]
vels = data["vel"][i]
for idb, idg, vel in zip(idbs, idgs, vels):
idb = np.array(idb)[np.array(idg) < self.nrange]
vel = np.array(vel)[np.array(idg) < self.nrange]
idg = np.array(idg)[np.array(idg) < self.nrange]
if len(vel) > 0: Vx[idb, np.round(idg).astype(int)] = vel
Vx = np.ma.masked_invalid(Vx)
vel_ax.pcolormesh(lons, lats, Vx, transform=cartopy.crs.PlateCarree(), cmap=plt.get_cmap("Spectral"),
vmax=vel_max, vmin=-vel_max)
vel_ax.text(1.02, 0.15, "Observations", horizontalalignment="center",
verticalalignment="center", transform=vel_ax.transAxes, fontdict={"color":"red"}, rotation=90)
vel_ax.enum(bounds=[(int(np.min(lons)/10)-1)*10, (int(np.max(lons)/10)+1)*10, 25, 70])
vel_ax = fig.add_subplot(122, projection="fovcarto",coords="geo", rad=rad, plot_date=scan_time)
rmse = np.sqrt(np.ma.sum((Vx-Vmod)**2)/np.ma.count(Vmod))
perror = np.ma.sum(np.abs((Vx-Vmod)/Vmod)/np.ma.count(Vmod)) * 100.
print(rmse, perror)
if rmse>0:vel_ax.text(0.3, 0.2, r"RMdSE: %.2f $ms^{-1}$"%rmse + "\n" + r"$\delta: %.2f$"%perror + "%", horizontalalignment="center",
verticalalignment="center", transform=vel_ax.transAxes, fontdict={"color":"red"})
self._add_colorbar(fig, vel_ax, vel_ranges, vel_cmap, label="Velocity [m/s]")
if save:
filepath = "%s/geo_%s.png" % (base_filepath, "%02d"%i)
fig.savefig(filepath)
fig.clf()
plt.close()
return
def plot_fov(self, data_dict, scans, name, start, data, skip=1,
vel_max=100, vel_step=10,
save=True, base_filepath=""):
vel_ranges = list(range(-vel_max, vel_max + 1, vel_step))
vel_ranges.insert(0, -9999)
vel_ranges.append(9999)
vel_cmap = plt.cm.jet_r # use "viridis" colormap to make this redgreen colorblind proof
vel_colors = vel_cmap(np.linspace(0, 1, len(vel_ranges)))
for i in scans:
fig = plt.figure(figsize=(8,4), dpi=120)
vel_ax = self.add_axis(fig, 122)
dat_ax = self.add_axis(fig, 121)
vels = data_dict["vel"][i]
beams = data_dict["beam"][i]
gates = data_dict["gate"][i]
print("----------", i, skip, int(i/skip))
d_vels = data["vel"][int(i/skip)]
d_beams = data["beam"][int(i/skip)]
d_gates = data["gate"][int(i/skip)]
for k, (beam, gate, vel) in enumerate(zip(beams, gates, vels)):
beam, gate, vel = np.array([beam]), np.array([gate]), np.array([vel])
for s in range(len(vel_ranges) - 1):
step_mask = (vel >= vel_ranges[s]) & (vel <= vel_ranges[s + 1])
beam_s = beam[step_mask]
gate_s = gate[step_mask]
self.plot(vel_ax, beam_s, gate_s, vel_colors[s])
# Add data
for k, (vel, beam, gate) in enumerate(zip(d_vels, d_beams, d_gates)):
beam, gate, vel = np.array([beam]), np.array([gate]), np.array([vel])
for s in range(len(vel_ranges) - 1):
step_mask = (vel >= vel_ranges[s]) & (vel <= vel_ranges[s + 1])
beam_s = beam[step_mask]
gate_s = gate[step_mask]
self.plot(dat_ax, beam_s, gate_s, vel_colors[s])
self._add_colorbar(fig, vel_ax, vel_ranges, vel_cmap, label="Velocity [m/s]")
scan_time = start + dt.timedelta(minutes=i)
plt.suptitle("%s \n Scan time %s UT \n Velocity" % (name, scan_time))
if save:
filepath = "%s_%s.png" % (base_filepath, "%02d"%i)
self.save(filepath)
fig.clf()
plt.close()
return
def plot_velocity_ts_beam(dn, rad, bmnum, model, start, end):
""" Plot velocity TS data """
fig = plt.figure(figsize=(5,6), dpi=150)
axs = [fig.add_subplot(311), fig.add_subplot(312), fig.add_subplot(313)]
mkeys = ["vd", "vf", "vt"]
fmt = matplotlib.dates.DateFormatter("%H:%M")
dic = "data/op/{dn}/{model}/{rad}/bm.{bm}/".format(dn=dn.strftime("%Y.%m.%d.%H.%M"),
rad=rad, model=model, bm="%02d"%bmnum)
fstr = glob.glob(dic + "/velocity_ti*mat")
fstr.sort()
#axs[0].set_title("%s UT, Radar - %s, Beam - %d, Model - %s"%(dn.strftime("%Y.%m.%d.%H.%M"), rad, bmnum, model))
#axs[0].set_title("%s UT, Radar - %s, Beam - %d"%(dn.strftime("%Y.%m.%d.%H.%M"), rad, bmnum))
axs[0].text(0.98, 1.05, r"Date: %s UT"%dn.strftime("%Y-%m-%d %H:%M"), horizontalalignment="right", verticalalignment="center",
transform=axs[0].transAxes)
axs[0].text(0.02, 1.05, "Rad: %s, Beam: %02d"%(rad, bmnum), horizontalalignment="left", verticalalignment="center",
transform=axs[0].transAxes)
cols = ["r", "b", "k"]
labs = [r"$V_{d\eta}$", r"$V_{dh}$", r"$V_{T}$"]
fname = "data/op/{dn}/{model}/sd_{rad}_data.csv.gz".format(dn=dn.strftime("%Y.%m.%d.%H.%M"), rad=rad, model=model)
dat = utils.get_sd_data(fname, bmnum).dropna()
mean, std = dat.groupby("time").mean().reset_index(), dat.groupby("time").std().reset_index()
I = 0
for ax, mkey, col, lab in zip(axs, mkeys, cols, labs):
ax.set_ylabel(r"Velocity, $ms^{-1}$")
ax.set_xlabel("Time, UT")
ax.xaxis.set_major_formatter(fmt)
v, vmax, vmin, vstd, time = [], [], [], [], []
for i, f in enumerate(fstr):
sdat = loadmat(f)
if mkey == "vt":
v.append(np.median(sdat["vd"]+sdat["vf"]))
vmax.append((sdat["vd"]+sdat["vf"]).max())
vmin.append((sdat["vd"]+sdat["vf"]).min())
vstd.append(1.96*np.std(sdat["vd"]+sdat["vf"]))
else:
v.append(np.median(sdat[mkey]))
vmax.append(sdat[mkey].max())
vmin.append(sdat[mkey].min())
vstd.append(1.96*np.std(sdat[mkey]))
time.append(start + dt.timedelta(minutes=i))
yerr = np.array([(mn, mx) for mn, mx in zip(vmin, vmax)]).T
ax.errorbar(time, v, yerr=vstd,
mec=col, mfc=col, fmt="r^", ms=1.5, ls="None", ecolor=col,
capsize=1, capthick=.4, elinewidth=0.4,
alpha=0.5, label=lab)
if I == 2:
ax.errorbar(mean.time, mean.v, yerr=std.v, mec="r", mfc="r", fmt="o",
ms=1.5, ls="None", ecolor="r",
capsize=1, capthick=.4, elinewidth=0.4,alpha=0.5,
label=r"$V_{sd}^{los}$")
if len(mean.v) > 50:
from scipy import signal
vmx = signal.resample(mean.v, len(v))
rmse = np.sqrt(np.median((vmx - np.array(v))**2))
perror = np.mean(np.abs((vmx - np.array(v))/np.array(v)))
ax.text(0.2, 0.85, r"RMdSE: %.2f $ms^{-1}$"%rmse + "\n" + r"$\delta: %.2f$"%perror+"%", ha="center", va="center",
transform=ax.transAxes, fontdict={"color":"red", "size":8})
print(rmse, perror)
ax.axhline(0, color="gray", ls="--", lw=0.6)
ax.legend(loc=1)
ax.set_ylim(-100, 200)
ax.set_xlim(start, end)
I += 1
fname = "data/op/{dn}/{model}/{rad}/bm{bm}.png".format(dn=dn.strftime("%Y.%m.%d.%H.%M"), rad=rad, model=model, bm="%02d"%bmnum)
fig.autofmt_xdate()
fig.savefig(fname,bbox_inches="tight")
return
class SensitivityAnalysis(object):
""" Sensitivity Analysis """
def __init__(self, problem, ds):
""" Initialize parameters """
self.problem = problem
self.ds = ds
return
def _hist_(self):
""" Histogram of outputs """
fig, ax = plt.subplots(figsize=(9,3), nrows=1, ncols=3, sharey=True)
labels = [r"$V_{d\eta}$ [m/s]", r"$V_{dh}$ [m/s]", r"$V_{t}$ [m/s]"]
params = ["vd_mean", "vf_mean", "vt_mean"]
for i, lab, pm in zip(range(3), labels, params):
ax[i].hist(self.ds.variables[pm][:].ravel(), 20)
ax[i].set_xlabel(lab)
ax[0].set_ylabel("Counts")
fig.subplots_adjust(wspace=0.1)
fig.savefig("data/sim/histogram.png", bbox_inches="tight")
return
def _regression_(self):
""" Regression Analysis """
import scipy
import seaborn as sns
ylabels = [r"$V_{d\eta}$ [m/s]", r"$V_{dh}$ [m/s]", r"$V_{t}$ [m/s]"]
xlabels = [r"$Ratio_{D}$", r"$Ratio_{E}$", r"Ratio_{F}"]
yparam = ["vd_mean", "vf_mean", "vt_mean"]
xparam = ["d_ratio", "e_ratio", "f_ratio"]
print(self.ds.variables["parameters"][:].shape)
for i, ylab, yp in zip(range(3), ylabels, yparam):
fig, ax = plt.subplots(1, 3, sharey=True)
y = self.ds.variables[yp][:].ravel()
for j, xlab, xp, a in zip(range(3), xlabels, xparam, ax):
x = self.ds.variables["parameters"][:][:,j]
sns.regplot(x, y, ax=a, ci=None, color="k",scatter_kws={"alpha":0.2, "s":4, "color":"gray"})
pearson = scipy.stats.pearsonr(x, y)
a.annotate("r: {:6.3f}".format(pearson[0]), xy=(0.15, 0.85), xycoords="axes fraction",fontsize=13)
a.set_xlabel(xlab)
if j==0: a.set_ylabel(ylab)
fig.savefig("data/sim/reg_{pm}.png".format(pm=yp), bbox_inches="tight")
plt.close()
return
def _normalize_(self, x, xmin, xmax):
return (x-xmin)/(xmax-xmin)
def _plot_circles_(self, ax, locs, names, max_s, stats, smax, smin, fc, ec, lw, zorder):
s = np.asarray([stats[name] for name in names])
s = 0.01 + max_s * np.sqrt(self._normalize_(s, smin, smax))
fill = True
for loc, name, si in zip(locs, names, s):
if fc=="w": fill=False
else: ec="none"
x = np.cos(loc)
y = np.sin(loc)
circle = plt.Circle((x,y), radius=si, ec=ec, fc=fc, transform=ax.transData._b,
zorder=zorder, lw=lw, fill=True)
ax.add_artist(circle)
return
def _filter_(self, sobol_indices, names, locs, criterion, threshold):
if criterion in ["ST", "S1", "S2"]:
data = sobol_indices[criterion]
data = np.abs(data)
data = data.flatten() # flatten in case of S2
# TODO:: remove nans
filtered = ([(name, locs[i]) for i, name in enumerate(names) if
data[i]>threshold])
filtered_names, filtered_locs = zip(*filtered)
elif criterion in ["ST_conf", "S1_conf", "S2_conf"]: raise NotImplementedError
else: raise ValueError("unknown value for criterion")
return filtered_names, filtered_locs
def _legend_(self, ax):
some_identifiers = [plt.Circle((0,0), radius=5, color="k", fill=False, lw=1),
plt.Circle((0,0), radius=5, color="k", fill=True),
plt.Line2D([0,0.5], [0,0.5], lw=8, color="darkgray")]
ax.legend(some_identifiers, ["ST", "S1", "S2"],
loc=(1,0.75), borderaxespad=0.1, mode="expand",
handler_map={plt.Circle: HandlerCircle()})
return
def _plot_sobol_indices_(self, sobol_indices, criterion="ST", threshold=0.01):
max_linewidth_s2 = 15#25*1.8
max_s_radius = 0.3
sobol_stats = {key:sobol_indices[key] for key in ["ST", "S1"]}
sobol_stats = pd.DataFrame(sobol_stats, index=self.problem["names"])
smax = sobol_stats.max().max()
smin = sobol_stats.min().min()
s2 = pd.DataFrame(sobol_indices["S2"], index=self.problem["names"],
columns=self.problem["names"])
s2[s2<0.0]=0. #Set negative values to 0 (artifact from small sample sizes)
s2max = s2.max().max()
s2min = s2.min().min()
names = self.problem["names"]
n = len(names)
ticklocs = np.linspace(0, 2*pi, n+1)
locs = ticklocs[0:-1]
filtered_names, filtered_locs = self._filter_(sobol_indices, names, locs,
criterion, threshold)
# setup figure
xnames = copy.copy(names)
xnames.extend(["D-Ratio"])
fig = plt.figure()
ax = fig.add_subplot(111, polar=True)
ax.grid(False)
ax.spines["polar"].set_visible(False)
ax.set_xticks(ticklocs)
ax.set_xticklabels(xnames)
ax.set_yticklabels([])
ax.set_ylim(top=1.4)
self._legend_(ax)
# plot ST
self._plot_circles_(ax, filtered_locs, filtered_names, max_s_radius,
sobol_stats["ST"], smax, smin, "w", "k", 1, 9)
# plot S1
self._plot_circles_(ax, filtered_locs, filtered_names, max_s_radius,
sobol_stats["S1"], smax, smin, "k", "k", 1, 10)
# plot S2
for name1, name2 in itertools.combinations(zip(filtered_names, filtered_locs), 2):
name1, loc1 = name1
name2, loc2 = name2
weight = s2.loc[name1, name2]
lw = 0.5+max_linewidth_s2*self._normalize_(weight, s2min, s2max)
ax.plot([loc1, loc2], [1,1], c="darkgray", lw=lw, zorder=1)
return fig
def analyze(self, regs=False):
""" Analyze and plots sensitivity test results """
self._hist_()
if regs: print("None")#self._regression_()
else:
labels = [r"$V_{d\eta}$ [m/s]", r"$V_{dh}$ [m/s]", r"$V_{t}$ [m/s]"]
params = ["vd_mean", "vf_mean", "vt_mean"]
for i, lab, pm in zip(range(3), labels, params):
Si = sobol.analyze(self.problem, self.ds.variables[pm][:].ravel(), calc_second_order=True, print_to_console=False)
Si_filter = {k:Si[k] for k in ["ST","ST_conf","S1","S1_conf"]}
Si_df = pd.DataFrame(Si_filter, index=self.problem["names"])
fig, ax = plt.subplots(1)
indices = Si_df[["S1","ST"]]
err = Si_df[["S1_conf","ST_conf"]]
indices.plot.bar(yerr=err.values.T,ax=ax)
fig.set_size_inches(4,4)
fig.savefig("data/sim/sens_{pm}.png".format(pm=pm), bbox_inches="tight")
plt.close()
fig = self._plot_sobol_indices_(Si, criterion="ST", threshold=0.005)
fig.set_size_inches(4,4)
fig.savefig("data/sim/intv_{pm}.png".format(pm=pm), bbox_inches="tight")
plt.close()
return
class ModelSensitivity(object):
""" Sensitivity Analysis """
def __init__(self, ds):
""" Initialize parameters """
self.problem = {
"num_vars": 3,
"names": ["D-Ratio", "E-Ratio", "F-Ratio"],
"bounds": [[np.min(ds.variables["d_ratio"][:]), np.max(ds.variables["d_ratio"][:])],
[np.min(ds.variables["e_ratio"][:]), np.max(ds.variables["e_ratio"][:])],
[np.min(ds.variables["f_ratio"][:]), np.min(ds.variables["f_ratio"][:])]]
}
self.ds = ds
print(ds.variables.keys())
return
def _hist_(self):
""" Histogram of outputs """
fig, ax = plt.subplots(figsize=(9,9), nrows=3, ncols=3, sharey=True, sharex=False)
labels = [[r"$V_{d\eta}$ [m/s]", r"$V_{dh}$ [m/s]", r"$V_{t}$ [m/s]"],
[r"$V_{d\eta}^{max}$ [m/s]", r"$V_{dh}^{max}$ [m/s]", r"$V_{t}^{max}$ [m/s]"],
[r"$V_{d\eta}^{min}$ [m/s]", r"$V_{dh}^{min}$ [m/s]", r"$V_{t}^{min}$ [m/s]"]]
params = [["vn", "vh", "vt"], ["vn_max", "vh_max", "vt_max"], ["vn_min", "vh_min", "vt_min"]]
bins = range(-10,110,4)
nx = np.arange(-20,160)
for i, labs, pms in zip(range(3), labels, params):
for j, lab, pm in zip(range(3), labs, pms):
u,loc,scale = skewnorm.fit(3*self.ds.variables[pm][:].ravel(),
floc=np.mean(3*self.ds.variables[pm][:].ravel()),
fscale=np.std(3*self.ds.variables[pm][:].ravel()))
ax[i,j].hist(3*self.ds.variables[pm][:].ravel(), bins=bins)
am = ax[i,j].twinx()
am.set_yticklabels([])
am.plot(nx, skewnorm.pdf(nx, a=u, loc=loc, scale=scale), "r", lw=1.5)
ax[i,j].set_xlabel(lab)
ax[i,j].set_xlim(-20, 160)
ax[i,0].set_ylabel("Counts")
fig.subplots_adjust(wspace=0.1, hspace=0.3)
fig.savefig("data/sim/histogram.png", bbox_inches="tight")
return
def _regression_(self):
""" Regression Analysis """
import scipy
import seaborn as sns
ylabels = [r"$V_{d\eta}$ [m/s]", r"$V_{dh}$ [m/s]", r"$V_{t}$ [m/s]"]
xlabels = [r"$Ratio_{D}$", r"$Ratio_{E}$", r"$Ratio_{F}$", r"$Rate_{D}$", r"$Rate_{E}$", r"$Rate_{F}$", r"$Frequency$", "SZA"]
xlabels = [r"$R_{D}$", r"$R_{E}$", r"$R_{F}$", r"$R^r_{D}$", r"$R^r_{E}$", r"$R^r_{F}$", r"$Frequency$", "SZA"]
yparam = ["vn", "vh", "vt"]
xparam = ["d_ratio", "e_ratio", "f_ratio", "d_rate", "e_rate", "f_rate", "frequency", "sza"]
token = ["(a)", "(b)", "(c)", "(d)", "(e)", "(f)", "(g)", "(h)"]
Xx = np.array([self.ds.variables["d_ratio"][:], self.ds.variables["e_ratio"][:], self.ds.variables["f_ratio"][:],
self.ds.variables["d_rate"][:], self.ds.variables["e_rate"][:], self.ds.variables["f_rate"][:],
self.ds.variables["frequency"][:]/1000, self.ds.variables["sza"][:]]).T
for i, ylab, yp in zip(range(3), ylabels, yparam):
fig, ax = plt.subplots(2, 4, sharey=True, figsize=(10,5))
y = 3*self.ds.variables[yp][:].ravel()
minfo = MIR(Xx,y)
for j, xlab, xp in zip(range(8), xlabels, xparam):
a = ax[np.mod(j,2), int(j/2)]
x = Xx[:,j]#self.ds.variables["parameters"][:][:,j]
#print(te.te_compute(x, np.array(y), k=1, embedding=1, safetyCheck=False, GPU=False))
sns.regplot(x, y, ax=a, ci=95, color="k",scatter_kws={"alpha":0.2, "s":1.5, "color":"red"})
pearson = scipy.stats.pearsonr(x, y)
a.annotate("r: {:1.2f}, MI: {:1.2f}".format(pearson[0], minfo[j]),
xy=(0.15, 0.85), xycoords="axes fraction",fontsize=10)
a.set_xlabel(xlab)
a.text(0.9, 0.9, token[j], horizontalalignment="center", verticalalignment="center",
transform=a.transAxes)
if j==0: a.set_ylabel(ylab)
fig.subplots_adjust(wspace=0.1, hspace=0.5)
fig.savefig("data/sim/reg_{pm}.png".format(pm=yp), bbox_inches="tight")
plt.close()
return
def _normalize_(self, x, xmin, xmax):
return (x-xmin)/(xmax-xmin)
def _plot_circles_(self, ax, locs, names, max_s, stats, smax, smin, fc, ec, lw, zorder):
s = np.asarray([stats[name] for name in names])
s = 0.01 + max_s * np.sqrt(self._normalize_(s, smin, smax))
fill = True
for loc, name, si in zip(locs, names, s):
if fc=="w": fill=False
else: ec="none"
x = np.cos(loc)
y = np.sin(loc)
circle = plt.Circle((x,y), radius=si, ec=ec, fc=fc, transform=ax.transData._b,
zorder=zorder, lw=lw, fill=True)
ax.add_artist(circle)
return
def _filter_(self, sobol_indices, names, locs, criterion, threshold):
if criterion in ["ST", "S1", "S2"]:
data = sobol_indices[criterion]
data = np.abs(data)
data = data.flatten() # flatten in case of S2
# TODO:: remove nans
filtered = ([(name, locs[i]) for i, name in enumerate(names) if
data[i]>threshold])
filtered_names, filtered_locs = zip(*filtered)
elif criterion in ["ST_conf", "S1_conf", "S2_conf"]: raise NotImplementedError
else: raise ValueError("unknown value for criterion")
return filtered_names, filtered_locs
def _legend_(self, ax):
some_identifiers = [plt.Circle((0,0), radius=5, color="k", fill=False, lw=1),
plt.Circle((0,0), radius=5, color="k", fill=True),
plt.Line2D([0,0.5], [0,0.5], lw=8, color="darkgray")]
ax.legend(some_identifiers, ["ST", "S1", "S2"],
loc=(1,0.75), borderaxespad=0.1, mode="expand",
handler_map={plt.Circle: HandlerCircle()})
return
def _plot_sobol_indices_(self, sobol_indices, criterion="ST", threshold=0.01):
max_linewidth_s2 = 15#25*1.8
max_s_radius = 0.3
sobol_stats = {key:sobol_indices[key] for key in ["ST", "S1"]}
sobol_stats = pd.DataFrame(sobol_stats, index=self.problem["names"])
smax = sobol_stats.max().max()
smin = sobol_stats.min().min()
s2 = pd.DataFrame(sobol_indices["S2"], index=self.problem["names"],
columns=self.problem["names"])
s2[s2<0.0]=0. #Set negative values to 0 (artifact from small sample sizes)
s2max = s2.max().max()
s2min = s2.min().min()
names = self.problem["names"]
n = len(names)
ticklocs = np.linspace(0, 2*pi, n+1)
locs = ticklocs[0:-1]
filtered_names, filtered_locs = self._filter_(sobol_indices, names, locs,
criterion, threshold)
# setup figure
xnames = copy.copy(names)
xnames.extend(["D-Ratio"])
fig = plt.figure()
ax = fig.add_subplot(111, polar=True)
ax.grid(False)
ax.spines["polar"].set_visible(False)
ax.set_xticks(ticklocs)
ax.set_xticklabels(xnames)
ax.set_yticklabels([])
ax.set_ylim(top=1.4)
self._legend_(ax)
# plot ST
self._plot_circles_(ax, filtered_locs, filtered_names, max_s_radius,
sobol_stats["ST"], smax, smin, "w", "k", 1, 9)
# plot S1
self._plot_circles_(ax, filtered_locs, filtered_names, max_s_radius,
sobol_stats["S1"], smax, smin, "k", "k", 1, 10)
# plot S2
for name1, name2 in itertools.combinations(zip(filtered_names, filtered_locs), 2):
name1, loc1 = name1
name2, loc2 = name2
weight = s2.loc[name1, name2]
lw = 0.5+max_linewidth_s2*self._normalize_(weight, s2min, s2max)
ax.plot([loc1, loc2], [1,1], c="darkgray", lw=lw, zorder=1)
return fig
def analyze(self, regs=True):
""" Analyze and plots sensitivity test results """
self._hist_()
if not regs:
print("None")
self._regression_()
else:
print(regs)
labels = [r"$V_{d\eta}$ [m/s]", r"$V_{dh}$ [m/s]", r"$V_{t}$ [m/s]"]
params = ["vn", "vh", "vt"]
for i, lab, pm in zip(range(3), labels, params):
v = self.ds.variables[pm][:].ravel()
x = np.array([self.ds.variables["d_rate"][:], self.ds.variables["e_rate"][:], self.ds.variables["f_rate"][:]]).T
print(v.shape, x.shape)
#Si = rbd_fast.analyze(self.problem, x, v, M=10, print_to_console=False)
#print(Si)
Si = sobol.analyze(self.problem, self.ds.variables[pm][:].ravel(), calc_second_order=True, print_to_console=False)
#print(Si)
Si_filter = {k:Si[k] for k in ["ST","ST_conf","S1","S1_conf"]}
Si_df = pd.DataFrame(Si_filter, index=self.problem["names"])
fig, ax = plt.subplots(1)
indices = Si_df[["S1","ST"]]
err = Si_df[["S1_conf","ST_conf"]]
indices.plot.bar(yerr=err.values.T,ax=ax)
fig.set_size_inches(4,4)
fig.savefig("data/sim/sens_{pm}.png".format(pm=pm), bbox_inches="tight")
plt.close()
#fig = self._plot_sobol_indices_(Si, criterion="ST", threshold=0.005)
#fig.set_size_inches(4,4)
#fig.savefig("data/sim/intv_{pm}.png".format(pm=pm), bbox_inches="tight")
#plt.close()
return
def plot_edens_versus_height(eDensPC, eDensAC, ylim=[50,350]):
fig, axes = plt.subplots(figsize=(15,6), nrows=2, ncols=5, sharey=True, sharex=False)
from scipy import stats
for i in range(5):
x, y = np.array(eDensPC[i+16]), np.array(eDensAC[i+16])
xmean, ymean = np.quantile(x, q=.56, axis=0), np.quantile(y, q=.56, axis=0) #np.median(x, axis=0), np.median(y, axis=0)
xstd, ystd = 0.3*stats.median_absolute_deviation(x, axis=0), 0.3*stats.median_absolute_deviation(y, axis=0)
xl, xu = utils.smooth(np.quantile(x, q=.5, axis=0), window_len=51),\
utils.smooth(np.quantile(x, q=.62, axis=0), window_len=51)
yl, yu = utils.smooth(np.quantile(y, q=.5, axis=0), window_len=51),\
utils.smooth(np.quantile(y, q=.62, axis=0), window_len=51)
xmean, ymean = utils.smooth(xmean, window_len=51), utils.smooth(ymean, window_len=51)
ax = axes[0, i]
ax.semilogx(xmean, np.arange(50,350,1).ravel(), "ro", lw=0.8, markersize=1)
ax.fill_betweenx(np.arange(50,350,1).ravel(), x1=xl, x2=xu, alpha=0.3, color="r")
ax.set_xlim(.01, 10)
if i==0: ax.set_ylabel("Height, km")
ax.set_xlabel("Percentage Change")
ax = axes[1, i]
ax.semilogx(ymean, np.arange(50,350,1).ravel(), "ro", lw=0.8, markersize=1)
ax.fill_betweenx(np.arange(50,350,1).ravel(), x1=yl, x2=yu, alpha=0.3, color="r")
ax.set_xlim(.1, 10000)
if i==0: ax.set_ylabel("Height, km")
ax.set_xlabel("Absolute Change")
fig.subplots_adjust(hspace=0.3)
fig.savefig("data/sens.png", bbox_inches="tight")
return
def plot_ssi_versus_bins(irr, wavelength, ylim=[50,350]):
fig, ax = plt.subplots(figsize=(4,4), nrows=1, ncols=1, sharey=True, sharex=False)
xmean = np.mean(irr, axis=0)#np.quantile(irr, q=.56, axis=0)
std = np.std(irr, axis=0)
print(xmean.shape)
ax.loglog(wavelength, xmean, "ro", lw=0.8, markersize=1)
ax.errorbar(wavelength, xmean, yerr=std, capthick=1, elinewidth=0.8, capsize=1, ecolor="r", marker="o", ls="None", ms=1, mfc="k", mec="k")
ax.set_ylim(1e5,1e12)
ax.set_xlabel(r"$\Lambda$ (A)")
ax.set_ylabel(r"$I_{o}$ ($Wm^{-2}$)")
fig.savefig("data/sens.b.png", bbox_inches="tight")
return
def plot_ray_edens(ev=dt.datetime(2015,5,5,21,51), rad="bks", time=18, maxground=1500, maxalt=300, step=1,
showrefract=True, nr_cmap="jet_r", nr_lim=[-0.5, 0.5],
raycolor="0.3", title=True, zorder=2, alpha=1,
fig=None, rect=111, ax=None, aax=None, freq=12., diff=True):
ax, aax = curvedEarthAxes(fig=fig, rect=rect, maxground=maxground, maxalt=maxalt, nyticks=3)
dic = "data/op/2015.05.05.22.11/waccmx/bks/bm.07/"
files = glob.glob(dic + "ti({ti})_elv(*)_{case}.csv".format(ti="%02d"%time, case="f"))
files.sort()
Re = 6371.
fx = []
ry = 5
print("I'm here")
for f in files[::ry]:
th, r, f, _, _ = get_polar(pd.read_csv(f))
fx.append(trapz(signal.resample(f,INT_F)))
v = (0.5 * f * 3e8 / (freq * 1e6))
if not showrefract: aax.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha)
else:
points = np.array([th, r]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lcol = LineCollection( segments, zorder=zorder, alpha=alpha)
_ = lcol.set_cmap( nr_cmap )
_ = lcol.set_norm( plt.Normalize(*nr_lim) )
_ = lcol.set_array( utils.smooth(v, window_len=21) )
_ = aax.add_collection( lcol )
dx = 0.1
aax.plot(np.arange(0,2000,dx)/Re, np.ones(int(2000*1/dx))*60+Re, color="b", lw=1.2, alpha=0.7)
aax.plot(np.arange(0,2000,dx)/Re, np.ones(int(2000*1/dx))*95+Re, color="orange", lw=1.2, alpha=0.7)
aax.plot(np.arange(0,2000,dx)/Re, np.ones(int(2000*1/dx))*130+Re, color="r", lw=1.2, alpha=0.7)
if showrefract:
cbax = addColorbar(lcol, ax)
_ = cbax.set_ylabel(r"$\Delta V_{d\eta}(t_i), ms^{-1}$", size=8)
dv = (0.5 * np.array(fx) * 3e8 / (freq * 1e6))
ax.text(0.99, 1.05, r"$V_{d\eta}(t_i)=%.2f$ $ms^{-1}$"%np.median(dv), horizontalalignment="right", verticalalignment="center",
transform=ax.transAxes, fontdict={"size":8})
#ax.text(0.01, 1.1, "Rad: bks, Beam: 07\nDate, $t_i$: %s UT"%(ev+dt.timedelta(minutes=time)).strftime("%Y-%m-%d %H:%M")
# , horizontalalignment="left", verticalalignment="center",
# transform=ax.transAxes, fontdict={"size":8})
ax.set_xlabel("Ground Range, $km$", fontdict={"size":8})
ax.set_ylabel("Height, $km$", fontdict={"size":8})
else: cbax = None
fig = ax.get_figure()
fig.savefig("data/figs/rt.dvn.ti({ti}).{case}.png".format(ti="%02d"%time, case="f"), bbox_inches="tight")
plt.close()
ax, aax = curvedEarthAxes(fig=None, rect=rect, maxground=maxground, maxalt=maxalt, nyticks=3)
dfiles = glob.glob(dic + "ti({ti})_elv(*)_{case}.csv".format(ti="%02d"%(time-1), case="f"))
dfiles.sort()
from scipy.io import loadmat
vel = loadmat(dic+"velocity_ti({ti}).mat".format(ti="%02d"%time))["vf"]
kx = 0
for f, df in zip(files[::ry], dfiles[::ry]):
if diff:
if kx < 1:
th, r, f, _, _ = get_polar(pd.read_csv(f))
dth, dr, df, _, _ = get_polar(pd.read_csv(df))
dh = -(np.max(dr) - np.max(r))
ax.text(0.95, 1.02, r"$\Delta h=%.2f$ $km$"%dh, horizontalalignment="right", verticalalignment="center",
transform=ax.transAxes, fontdict={"size":8})
aax.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha, lw=0.8)
aax.plot(dth, dr, c="b", zorder=zorder, alpha=alpha, ls="--",lw=1.6)
#aax.plot(np.arange(0,2000,dx)/Re, np.ones(int(2000*1/dx))*np.max(dr), color="b", ls="--", lw=0.6, alpha=0.7)
#aax.plot(np.arange(0,2000,dx)/Re, np.ones(int(2000*1/dx))*np.max(r), color="r", ls="--", lw=0.6, alpha=0.7)
axins = ax.inset_axes([0.4, -.8, 0.3, 0.5])
axins.plot(np.arange(0,2000,dx)/Re, np.ones(int(2000*1/dx))*np.max(dr), color="b", ls="--", lw=0.6, alpha=0.7)
axins.plot(np.arange(0,2000,dx)/Re, np.ones(int(2000*1/dx))*np.max(r), color="k", ls="--", lw=0.6, alpha=0.7)
axins.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha, lw=0.8)
axins.plot(dth, dr, c="b", zorder=zorder, alpha=alpha, ls="--",lw=1.6)
axins.set_ylim(int(np.max(dr)-10), int(np.max(r)+10))
lenx = np.argmax(r)
axins.set_xlim(th[lenx-10], th[lenx+10])
axins.set_yticks(np.linspace(int(np.max(dr)-10), int(np.max(r)+10), 3))
axins.set_yticklabels((np.linspace(int(np.max(dr)-10), int(np.max(r)+10), 3)-Re).astype(int), fontdict={"size":7})
axins.set_xticks(np.linspace(th[lenx-10], th[lenx+5], 4))
axins.set_xticklabels((np.linspace(th[lenx-10], th[lenx+10], 4)*Re).astype(int), fontdict={"size":7})
axins.set_xlabel("Ground Range, $km$", fontdict={"size":8})
axins.set_ylabel("Height, $km$", fontdict={"size":8})
aax.indicate_inset_zoom(axins)
else:
th, r, f, _, _ = get_polar(pd.read_csv(f))
dth, dr, df, _, _ = get_polar(pd.read_csv(df))
aax.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha, lw=0.8)
kx += 1
aax.plot(np.arange(0,2000,dx)/Re, np.ones(int(2000*1/dx))*60+Re, color="b", lw=1.2, alpha=0.7)
aax.plot(np.arange(0,2000,dx)/Re, np.ones(int(2000*1/dx))*95+Re, color="orange", lw=1.2, alpha=0.7)
aax.plot(np.arange(0,2000,dx)/Re, np.ones(int(2000*1/dx))*130+Re, color="r", lw=1.2, alpha=0.7)
#ax.text(0.02, 1.05, "Rad: bks, Beam: 07\nDate, $t_i$: %s UT"%(ev+dt.timedelta(minutes=time)).strftime("%Y-%m-%d %H:%M")
# , horizontalalignment="left", verticalalignment="center",
# transform=ax.transAxes)
if diff:
ax.text(0.01, 1.05, r"$V_{dh}(t_i)=%.2f$ $ms^{-1}$"%np.median(vel), horizontalalignment="left", verticalalignment="center",
transform=ax.transAxes, fontdict={"size":8})
else:
ax.text(0.99, 1.05, r"$V_{dh}(t_i)=%.2f$ $ms^{-1}$"%np.median(vel), horizontalalignment="right", verticalalignment="center",
transform=ax.transAxes, fontdict={"size":8})
fig = ax.get_figure()
fig.savefig("data/figs/rt.dvh.ti({ti}).{case}.png".format(ti="%02d"%time, case="f"), bbox_inches="tight")
plt.close()
ax, aax = curvedEarthAxes(fig=None, rect=rect, maxground=maxground, maxalt=maxalt, nyticks=3)
ne = loadmat(dic+"ne_ti({ti})_f.mat".format(ti="%02d"%time))["ne"]
ne_bgc = loadmat(dic+"ne_ti({ti})_f.mat".format(ti="%02d"%(time-1)))["ne"]
if diff: ne = np.abs(ne - ne_bgc)
x, y = np.meshgrid(np.arange(0,2000,10)/Re, np.arange(50, 350)+Re)
if diff: mp = aax.pcolor(x, y, ne, cmap="plasma", norm = matplotlib.colors.LogNorm(vmin=1e2, vmax=1e5))
else: mp = aax.pcolor(x, y, ne, cmap="plasma", norm = matplotlib.colors.LogNorm(vmin=1e2, vmax=1e6))
cbax = addColorbar(mp, ax)
if diff: _ = cbax.set_ylabel(r"$\Delta N_e(t_i,t_{i-1}), cm^{-3}$")
else: _ = cbax.set_ylabel(r"$N_e(t_i), cm^{-3}$")
ax.text(0.01, 1.1, "Rad: bks, Beam: 07\nDate, $t_i$: %s UT"%(ev+dt.timedelta(minutes=time)).strftime("%Y-%m-%d %H:%M"),
horizontalalignment="left", verticalalignment="center",
transform=ax.transAxes)
#ax.text(0.95, 1.05, r"$N_e^{250}=%.2f\times 10^5$ $cm^{-3}$"%np.max(ne[200,:]/1e5), horizontalalignment="right", verticalalignment="center",
# transform=ax.transAxes)
fig = ax.get_figure()
fig.savefig("data/figs/density.ti({ti}).{case}.png".format(ti="%02d"%time, case="f"))
plt.close()
return
| 47.133948
| 145
| 0.575437
|
794e9cec4ba21ed196e794ac8d0fe3e86999ed64
| 222
|
py
|
Python
|
eskill_custom/eskill_customisations/doctype/warranty_swap_out/test_warranty_swap_out.py
|
mohsinalimat/eskill_custom
|
1aa4a591c71144d751b78e0a2907353336e71f37
|
[
"MIT"
] | 1
|
2021-07-09T11:49:27.000Z
|
2021-07-09T11:49:27.000Z
|
eskill_custom/eskill_customisations/doctype/warranty_swap_out/test_warranty_swap_out.py
|
mohsinalimat/eskill_custom
|
1aa4a591c71144d751b78e0a2907353336e71f37
|
[
"MIT"
] | 1
|
2021-04-13T13:49:55.000Z
|
2021-04-13T13:49:55.000Z
|
eskill_custom/eskill_customisations/doctype/warranty_swap_out/test_warranty_swap_out.py
|
mohsinalimat/eskill_custom
|
1aa4a591c71144d751b78e0a2907353336e71f37
|
[
"MIT"
] | 4
|
2021-05-05T01:25:49.000Z
|
2022-01-31T21:57:56.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Eskill Trading and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestWarrantySwapOut(unittest.TestCase):
pass
| 20.181818
| 53
| 0.77027
|
794e9da92f90c56a550d311dd11fb5ce313ed5d3
| 1,103
|
py
|
Python
|
backend/users/models.py
|
landdafku11/mobile-backend
|
3f3328afd81f85f90170a57689af72f8f705b8a3
|
[
"MIT"
] | null | null | null |
backend/users/models.py
|
landdafku11/mobile-backend
|
3f3328afd81f85f90170a57689af72f8f705b8a3
|
[
"MIT"
] | null | null | null |
backend/users/models.py
|
landdafku11/mobile-backend
|
3f3328afd81f85f90170a57689af72f8f705b8a3
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
email = models.EmailField(blank=True, max_length=255, unique=True)
apple_id = models.CharField(null=True, max_length=255)
username = models.CharField(null=True, max_length=255)
password = models.CharField(null=True, max_length=128)
email_confirmed = models.BooleanField(blank=False, default=False)
overview = models.TextField(null=True)
location = models.CharField(null=True, max_length=128)
avatar = models.TextField(null=True)
password_reset_token = models.IntegerField(null=True)
password_reset_sent_at = models.DateTimeField(null=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
return self.email
class Following(models.Model):
class Meta:
db_table = 'following'
follower = models.ForeignKey(User, on_delete=models.CASCADE, related_name='follower')
followed = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed')
def __str__(self):
return self.pk
| 32.441176
| 89
| 0.733454
|
794e9dd3c4975fee510bcb1b56e5920bcafd83ce
| 1,549
|
py
|
Python
|
prereq_map/views/api.py
|
uw-it-aca/prereq-map
|
aa8cf09145f16451ce625c6716340fc30cd04570
|
[
"Apache-2.0"
] | null | null | null |
prereq_map/views/api.py
|
uw-it-aca/prereq-map
|
aa8cf09145f16451ce625c6716340fc30cd04570
|
[
"Apache-2.0"
] | 31
|
2019-05-15T23:29:38.000Z
|
2022-02-12T11:43:41.000Z
|
prereq_map/views/api.py
|
uw-it-aca/prereq-map
|
aa8cf09145f16451ce625c6716340fc30cd04570
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import json
from django.views.decorators.cache import cache_control
from django.http import HttpResponse
from django.views import View
from prereq_map.utils.process_data import get_graph
from prereq_map.utils.typeahead import get_curric_typeahead
from prereq_map.utils.typeahead import get_course_typeahead
import logging
logger = logging.getLogger(__name__)
class CurricApiView(View):
@cache_control(max_age=86400)
def get(self, request, curric_code):
response = get_graph(curric_filter=curric_code.upper())
if len(response['x']['nodes']['course_number']) > 0:
return HttpResponse(json.dumps(response))
else:
return error_404()
class CourseApiView(View):
@cache_control(max_age=86400)
def get(self, request, course_code):
response = get_graph(course_filter=course_code.upper())
if response:
return HttpResponse(json.dumps(response))
else:
return error_404()
class CurricTypeaheadApiView(View):
@cache_control(max_age=86400)
def get(self, request):
response = get_curric_typeahead()
return HttpResponse(json.dumps(response))
class CourseTypeaheadApiView(View):
@cache_control(max_age=86400)
def get(self, request):
response = get_course_typeahead()
return HttpResponse(json.dumps(response))
def error_404():
response = HttpResponse()
response.status_code = 404
return response
| 28.163636
| 63
| 0.719174
|
794e9e04f736e27b60875ab9a5a27488ab644469
| 330
|
py
|
Python
|
pythonchallenge/level12.py
|
gitduk/web_craw
|
0d3d5c3de91bb1a1c83845464b7279069d5a66f5
|
[
"MIT"
] | null | null | null |
pythonchallenge/level12.py
|
gitduk/web_craw
|
0d3d5c3de91bb1a1c83845464b7279069d5a66f5
|
[
"MIT"
] | null | null | null |
pythonchallenge/level12.py
|
gitduk/web_craw
|
0d3d5c3de91bb1a1c83845464b7279069d5a66f5
|
[
"MIT"
] | 2
|
2021-04-30T06:14:59.000Z
|
2022-01-20T07:16:14.000Z
|
from PIL import Image
im = Image.open('./evil1.jpg')
w, h = im.size
im_tuple = []
for i,d in enumerate(im.getdata()):
r,g,b = d
if r >=100 or g >=100 or b>=100:
im_tuple.append(d)
else:
continue
image = Image.new(im.mode, (im.size[0]-10, im.size[1]-10))
image.putdata(im_tuple)
image.show()
| 13.2
| 58
| 0.584848
|
794e9f3c5cc0a526ebb87844d148070cc30a4df5
| 10,409
|
py
|
Python
|
tests/gsdata.py
|
QGB/QPSU
|
7bc214676d797f42d2d7189dc67c9377bccdf25d
|
[
"MIT"
] | 6
|
2018-03-25T20:05:21.000Z
|
2022-03-13T17:23:05.000Z
|
tests/gsdata.py
|
pen9un/QPSU
|
76e1a3f6f6f6f78452e02f407870a5a32177b667
|
[
"MIT"
] | 15
|
2018-05-14T03:30:21.000Z
|
2022-03-03T15:33:25.000Z
|
tests/gsdata.py
|
pen9un/QPSU
|
76e1a3f6f6f6f78452e02f407870a5a32177b667
|
[
"MIT"
] | 1
|
2021-07-15T06:23:45.000Z
|
2021-07-15T06:23:45.000Z
|
import hashlib
import base64
import requests
import pickle
import xlwt
import os
import re
import json
import random
class GsDataAPI:
def __init__(self):
self.app_key = 'b523947e120c8ee8a88cb278527ddb5a'
self.app_secret = '1962972fee15606cd1ad1dc8080bb289'
self.sort_map = {'1': 'posttime', '2': 'readnum', '3': 'likenum'}
self.order_map = {'1': 'desc', '2': 'asc'}
self.news_list = []
def _gen_access_token(self, params, router):
params_list = sorted(params.items(), key=lambda x: x[0])
params_str = ''.join([''.join(params) for params in params_list])
params_final = '%s_%s_%s' % (self.app_secret, params_str, self.app_secret)
m = hashlib.md5()
m.update(params_final.encode('utf-8'))
sign = m.hexdigest()
C = base64.b64encode(bytes(self.app_key+':'+sign+':'+router, encoding='utf-8'))
return C
def get_msg_info(self, **kwargs):
'''
参数 类型 可空 默认 描述 示例
wx_name String YES --- 微信号 rmrbwx
posttime_start String YES --- 文章发布开始时间 2018-08-20 10:00:00
posttime_end String YES --- 文章发布结束时间 2018-09-07 06:00:00(不含)
entertime_start String YES --- 文章入库开始时间 2018-08-08 12:00:00
entertime_end String YES --- 文章入库结束时间 2018-08-20 22:00:00(不含)
keywords String YES --- 检索词 aaa+bbb,ccc,ddd+eee
order String YES desc 排序方式 desc
sort String YES posttime 排序字段 posttime
page Integer YES 1 第几页 1
limit Integer YES 50 每页显示条数 20
sn String YES -- sn aabbcc
'''
kwargs['limit'] = str(kwargs.get('limit', 50))
if kwargs.get('posttime_start') is not None:
kwargs['posttime_start'] += ' 00:00:00'
if kwargs.get('posttime_end') is not None:
kwargs['posttime_end'] += ' 24:00:00'
sort_type = kwargs.get('sort')
if sort_type in [None, 'posttime']:
router = '/weixin/article/search1'
elif sort_type == 'readnum':
router = '/weixin/article/search2'
elif sort_type == 'likenum':
router = '/weixin/article/search3'
else:
return None
params = kwargs
self.news_list = []
while True:
url = 'http://databus.gsdata.cn:8888/api/service'
C = self._gen_access_token(params, router)
r = requests.get(url, headers={'access-token': C}, params=params)
r_js = r.json()
if not r_js['success']:
print(r_js)
data = r_js['data']
num_found = data['numFound']
pagination = data['pagination']
page = pagination['page']
if page == 1:
print('总计%d篇文章' % num_found)
self.news_list.extend(data['newsList'])
news_list_len = len(self.news_list)
print('已获取%d篇' % (news_list_len))
if news_list_len >= num_found:
break
params['page'] = str(page + 1)
# with open('test.pkl', 'wb') as f:
# pickle.dump(self.news_list, f)
def save_as_excel(self, filename):
wb = xlwt.Workbook()
ws = wb.add_sheet('Sheet0')
header = ['标题', '摘要', '发布时间', '作者', '阅读数', '点赞数', '链接']
for i, field in enumerate(header):
ws.write(0, i, field)
col_width = [10000, 10000, 5000, 5000, 5000, 5000, 20000]
col_count = len(col_width)
for i in range(col_count):
ws.col(i).width = col_width[i]
row = 1
for news in self.news_list:
ws.write(row, 0, news['news_title'])
ws.write(row, 1, news['news_digest'])
ws.write(row, 2, news['news_posttime'])
ws.write(row, 3, news['news_author'])
ws.write(row, 4, news['news_read_count'])
ws.write(row, 5, news['news_like_count'])
ws.write(row, 6, news['news_url'])
row += 1
wb.save(filename)
class IDataApi:
def __init__(self):
self.api_key = 'vYpznyAwychvW7ur6HMbUx08YgO81ZX2eFpLytUGRTHeitTSUIONsZLpps3O18aY'
self.data_json = None
def get_msg_info(self, **kwargs):
url = "http://api01.idataapi.cn:8000/post/weixin?apikey=%s" % self.api_key
params = kwargs
headers = {
"Accept-Encoding": "gzip",
"Connection": "close"
}
if not os.path.exists('idata.pkl'):
r = requests.get(url, headers=headers, params=params)
self.data_json = r.json()
if self.data_json['retcode'] == '000000':
with open('idata.pkl', 'wb') as f:
pickle.dump(r.json(), f)
else:
print(self.data_json['message'])
return
else:
with open('idata.pkl', 'rb') as f:
self.data_json = pickle.load(f)
data_list = self.data_json['data']
has_next = self.data_json['hasNext']
page_token = self.data_json['pageToken']
print(has_next)
print(page_token)
print(len(data_list))
for data in data_list:
print(data['title'])
print(data['url'])
print('')
class WechatAPI:
def __init__(self):
self.url = 'https://mp.weixin.qq.com/mp/profile_ext'
self.headers = {
'Host': 'mp.weixin.qq.com',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36 QBCore/3.53.1159.400 QQBrowser/9.0.2524.400 Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 MicroMessenger/6.5.2.501 NetType/WIFI WindowsWechat',
'X-Requested-With': 'XMLHttpRequest',
'Accept': '*/*',
'Referer': 'https://mp.weixin.qq.com/mp/profile_ext?action=home&__biz=MzA5NDc1NzQ4MA==&scene=124&uin=MTMxOTI3Mjc1&key=18296be7e87fa916d06e197d2c416373765f9d9507fb1be1ca58b7278b74ab20427f8abc1b76922d43a42c46fe052bc4e7e6cd1a8e8613615ef660c888a2fb12f463a593d439a46d1a7360fa075108b4&devicetype=Windows+7&version=62060833&lang=zh_CN&a8scene=7&pass_ticket=P12nGbyGYqcxMn8TPtsskVbRJo%2BH9Rojj4I0SNfyL9I%3D&winzoom=1',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.5;q=0.4',
'Cookie': 'rewardsn=; wxtokenkey=777; wxuin=131927275; devicetype=Windows7; version=62060833; lang=zh_CN; pass_ticket=P12nGbyGYqcxMn8TPtsskVbRJo+H9Rojj4I0SNfyL9I=; wap_sid2=COuZ9D4SXGhFWm10djluQ2NCT0d5SHIwMDB1RzBzZ09MNXhnUzhQanBndFB6TDdfTlNzajU1enllMG91cnBvV29FVkxUbXZxVG9janhtcmxZNUNUMTRGRnlCN2dfNERBQUF+MN6i2OoFOA1AlU4=',
}
with open('cookie.txt', 'r') as f:
cookie = f.read()
self.cookies = json.loads(cookie)
def get_token(self):
url = 'https://mp.weixin.qq.com'
response = requests.get(url=url, cookies=self.cookies, verify=False)
token = re.findall(r'token=(\d+)', str(response.url))[0]
print('token:', token)
return token
def get_fakeid(self, mp_id, token):
header = {
"HOST": "mp.weixin.qq.com",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0"
}
query = mp_id
query_id = {
'action': 'search_biz',
'token': token,
'lang': 'zh_CN',
'f': 'json',
'ajax': '1',
'random': random.random(),
'query': query,
'begin': '0',
'count': '5',
}
search_url = 'https://mp.weixin.qq.com/cgi-bin/searchbiz?'
search_response = requests.get(search_url, cookies=self.cookies, headers=header, params=query_id, verify=False)
lists = search_response.json().get('list')[0]
fakeid = lists.get('fakeid')
print(search_response.json())
print('fakeid:', fakeid)
return fakeid
def get_msg(self, fakeid):
params = {
'action': 'getmsg',
'__biz': fakeid,
'f': 'json',
'offset': 10,
'count': 10,
'is_ok': 1,
'scene': 124,
'uin': 'MTMxOTI3Mjc1',
'key': '05eee5e78663c69d88f47c0818a8666d07f12fc80c52ca172928d0d2f7f0bc59ec7fd19cd4b4d4aed825422af5fb0533cefb3abd47cad1705843f61422a0a9ba9e70c3dd8afc9d75ce3d8f50d26b69e7',
'pass_ticket': 'P12nGbyGYqcxMn8TPtsskVbRJo%2BH9Rojj4I0SNfyL9I%3D',
'wxtoken': '',
'appmsg_token': '1022_MIIE0%2BkZ3ICFd%2FeOj_GH9X3jzWdqoH8RvZkHnA~~',
'x5': 0,
'f': 'json'
}
i = 0
while True:
# r = requests.get(self.url, headers=self.headers, params=params, verify=False)
r = requests.get(self.url, params=params, verify=False)
r_json = r.json()
if r_json['errmsg'] == 'ok':
msg_list = eval(r_json['general_msg_list'])['list']
for msg in msg_list:
try:
app_msg_ext_info = msg['app_msg_ext_info']
print(app_msg_ext_info['title'])
print(app_msg_ext_info['link'])
except KeyError:
print(msg)
continue
else:
print(r_json['errmsg'])
print(r_json)
break
if r_json['can_msg_continue'] != 1:
break
params['offset'] = r_json['next_offset']
i += 1
if i == 100:
break
if __name__ == '__main__':
pass
# api = GsDataAPI()
# news_list = api.get_msg_info(wx_name='chaping321', posttime_start='2019-07-15', posttime_end='2019-07-28')
# idata_api = IDataApi()
#idata_api.get_msg_info(uid='chaping321', searchMode='top', beginDate='2018-03-01', endDate='2019-08-14')
wechat_api = WechatAPI()
token = wechat_api.get_token()
fakeid = wechat_api.get_fakeid('chaping321', token)
wechat_api.get_msg(fakeid)
| 40.501946
| 422
| 0.557883
|
794e9f604113903def57e31761ff5d173d130045
| 1,635
|
py
|
Python
|
is_valid/is_match.py
|
nandoflorestan/transvalid
|
4e0adbaad35188312189112cac0c4f187116b4b9
|
[
"MIT"
] | 4
|
2017-10-11T14:04:35.000Z
|
2019-03-29T08:38:09.000Z
|
is_valid/is_match.py
|
nandoflorestan/transvalid
|
4e0adbaad35188312189112cac0c4f187116b4b9
|
[
"MIT"
] | 1
|
2017-11-27T14:43:19.000Z
|
2018-01-14T15:05:38.000Z
|
is_valid/is_match.py
|
Daanvdk/is_valid
|
615c5ae1999095cba398af6ae041a472769857f8
|
[
"MIT"
] | 1
|
2021-06-05T18:06:49.000Z
|
2021-06-05T18:06:49.000Z
|
from .base import Predicate
from .explanation import Explanation
from .is_str import is_str
import re
class is_match(Predicate):
"""
A predicate that checks if the data matches the given pattern. If a string
is provided as a pattern this predicate will compile it first. The
optional parameter ``flags`` allows you to specify flags for this
aforementioned compilation.
"""
prerequisites = [is_str]
def __init__(self, regex, flags=0, rep=None, match_as_data=False):
if isinstance(regex, str):
regex = re.compile(regex, flags)
if rep is None:
rep = '/{}/{}'.format(
regex.pattern,
''.join(
char
for flag, char in [
(re.A, 'a'), (re.I, 'i'), (re.L, 'l'),
(re.M, 'm'), (re.S, 's'), (re.X, 'x'),
]
if regex.flags & flag
),
)
self._regex = regex
self._valid_exp = Explanation(
True, 'match', 'data does match {}'.format(rep)
)
self._not_valid_exp = Explanation(
False, 'not_match', 'data does not match {}'.format(rep)
)
self._match_as_data = match_as_data
def _evaluate(self, data, explain, context):
match = self._regex.search(data)
if match:
res = self._valid_exp if explain else True
if explain and self._match_as_data:
res = res.copy(data=match)
else:
res = self._not_valid_exp if explain else False
return res
| 32.7
| 78
| 0.53578
|
794ea0009fd2f41bafd795e14be694e607bb933d
| 4,123
|
py
|
Python
|
includes/carddecks.py
|
torbjornhedqvist/blackjack
|
773faf0d1a4aaeb27ce7e436fc39d34c8ec46e21
|
[
"MIT"
] | 5
|
2018-11-25T21:17:46.000Z
|
2022-02-25T17:18:59.000Z
|
includes/carddecks.py
|
torbjornhedqvist/blackjack
|
773faf0d1a4aaeb27ce7e436fc39d34c8ec46e21
|
[
"MIT"
] | 5
|
2018-11-25T21:39:41.000Z
|
2018-11-26T10:45:57.000Z
|
includes/carddecks.py
|
torbjornhedqvist/blackjack
|
773faf0d1a4aaeb27ce7e436fc39d34c8ec46e21
|
[
"MIT"
] | 2
|
2021-01-09T12:41:45.000Z
|
2021-08-20T16:19:12.000Z
|
#!/usr/bin/env python
"""
Create a playing card deck (normal 52 card deck) or virtual "Shoe" of
decks if more than one is defined.
When one ore more decks are created they will be shuffled.
Copyright (C) Torbjorn Hedqvist - All Rights Reserved
You may use, distribute and modify this code under the
terms of the MIT license. See LICENSE file in the project
root for full license information.
"""
from random import shuffle
from playingcard import PlayingCard
class CardDecks(object):
"""
When instantiated holds a list of
:meth:`lib.playingcard.PlayingCard` objects in random order.
"""
def __init__(self, num_of_decks=1):
"""
Create one or more playing card decks and shuffle them all
together in a list.
"""
self.__card_decks = []
for num in range(0, num_of_decks):
for suit in range(0, 4):
for rank in range(1, 14):
instance = PlayingCard(rank, suit)
self.__card_decks.append(instance)
self.shuffle()
def shuffle(self):
"""
Shuffle all the cards in this instance list.
:return: None
"""
shuffle(self.__card_decks)
def pop(self):
"""
Pop (pull and remove) the last card in the list.
:return: A :meth:`lib.playingcard.PlayingCard` object.
"""
return self.__card_decks.pop()
def length(self):
"""
:return: The length (the number of remaining cards) in the list.
"""
return len(self.__card_decks)
class TestingCardDeck(object):
"""
Used to create a pre-defined deck for testing purposes
"""
def __init__(self):
"""
Instantiate an instance of a deck containing
:meth:`lib.playingcard.PlayingCard` objects
which have pre-defined values to test specific
scenarios in the Black Jack game
"""
self.__card_decks = []
for x in range(1, 52): # Fill up a deck of dummies
instance = PlayingCard(7, 1)
self.__card_decks.append(instance)
# Stay on 19 (ace + 8) and dealer gets two aces 1+1+4+(common value in deck above)
self.__card_decks.append(PlayingCard(4, 1))
self.__card_decks.append(PlayingCard(1, 0))
self.__card_decks.append(PlayingCard(8, 3))
self.__card_decks.append(PlayingCard(1, 3))
self.__card_decks.append(PlayingCard(1, 2))
# Two tens to player to be used for split, followed by two aces to see how a
# double black jack is handled.
self.__card_decks.append(PlayingCard(6, 2))
self.__card_decks.append(PlayingCard(8, 1))
self.__card_decks.append(PlayingCard(4, 1))
self.__card_decks.append(PlayingCard(10, 0))
# First hand for player is a BlackJack
self.__card_decks.append(PlayingCard(6, 2))
self.__card_decks.append(PlayingCard(10, 1))
self.__card_decks.append(PlayingCard(4, 1))
self.__card_decks.append(PlayingCard(1, 0))
# Start with a low hand for player to test double down
self.__card_decks.append(PlayingCard(6, 2))
self.__card_decks.append(PlayingCard(2, 1))
self.__card_decks.append(PlayingCard(4, 1))
self.__card_decks.append(PlayingCard(2, 0))
# Create a split, first hand ok and second busted
self.__card_decks.append(PlayingCard(12, 1))
self.__card_decks.append(PlayingCard(4, 1))
self.__card_decks.append(PlayingCard(2, 0))
self.__card_decks.append(PlayingCard(6, 2))
self.__card_decks.append(PlayingCard(8, 1))
self.__card_decks.append(PlayingCard(4, 1))
self.__card_decks.append(PlayingCard(8, 0))
def pop(self):
"""
Pop (pull and remove) the last card in the list.
:return: A :meth:`lib.playingcard.PlayingCard` object.
"""
return self.__card_decks.pop()
def length(self):
"""
:return: The length (the number of remaining cards) in the list.
"""
return len(self.__card_decks)
| 30.768657
| 90
| 0.628426
|
794ea10744e38a5d14d96e353f80b49d78072a64
| 748
|
py
|
Python
|
pango-1.42.4/tests/gen-installed-test.py
|
CSRedRat/scratchjr-linux-ubuntu
|
f11dc037d889e97ca26778dc3133d94f22dbcf38
|
[
"BSD-3-Clause"
] | 1
|
2021-12-20T17:27:24.000Z
|
2021-12-20T17:27:24.000Z
|
pango-1.42.4/tests/gen-installed-test.py
|
CSRedRat/scratchjr-linux-ubuntu
|
f11dc037d889e97ca26778dc3133d94f22dbcf38
|
[
"BSD-3-Clause"
] | 3
|
2021-12-20T17:36:50.000Z
|
2022-03-06T08:54:25.000Z
|
pango-1.42.4/tests/gen-installed-test.py
|
CSRedRat/scratchjr-linux-ubuntu
|
f11dc037d889e97ca26778dc3133d94f22dbcf38
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import argparse
import os
template = '''[Test]
Type=session
Exec={}
'''
def build_template(test_dir, test_name):
return template.format(os.path.join(test_dir, test_name))
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Generate installed-test description file')
argparser.add_argument('installed_test_dir', help='Path for installed test binaries')
argparser.add_argument('test_name', help='Name of the test unit')
argparser.add_argument('out_dir', help='Path for the output')
args = argparser.parse_args()
outfile = os.path.join(args.out_dir, args.test_name + '.test')
with open(outfile, 'w') as f:
f.write(build_template(args.installed_test_dir, args.test_name))
| 31.166667
| 95
| 0.731283
|
794ea183db4fa388665805b018f4558b69fd4795
| 8,268
|
py
|
Python
|
pybrain/tests/optimizationtest.py
|
sveilleux1/pybrain
|
1e1de73142c290edb84e29ca7850835f3e7bca8b
|
[
"BSD-3-Clause"
] | 2,208
|
2015-01-02T02:14:41.000Z
|
2022-03-31T04:45:46.000Z
|
pybrain/tests/optimizationtest.py
|
sveilleux1/pybrain
|
1e1de73142c290edb84e29ca7850835f3e7bca8b
|
[
"BSD-3-Clause"
] | 91
|
2015-01-08T16:42:16.000Z
|
2021-12-11T19:16:35.000Z
|
pybrain/tests/optimizationtest.py
|
sveilleux1/pybrain
|
1e1de73142c290edb84e29ca7850835f3e7bca8b
|
[
"BSD-3-Clause"
] | 786
|
2015-01-02T15:18:20.000Z
|
2022-02-23T23:42:40.000Z
|
from __future__ import print_function
#! /usr/bin/env python
""" This test script will test the set of optimization algorithms.
It tests
- the conformity of interface
- the behavior on simple functions
- the behavior on FitnessEvaluators
- the behavior when optimizing a list or an array
- the behavior when optimizing an Evolvable
- the behavior when optimizing a ParameterContainer
- consistency w.r.t. minimization/maximization
Tests to be added:
- tolerance of problems that have a constant fitness
- tolerance of problems that have adversarial (strictly decreasing) fitness
- handling one-dimensional and high-dimensional spaces
- reasonable results on the linear function
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
from inspect import isclass
from scipy import sum, array, ndarray, log10
from random import random, choice
import pybrain.optimization.optimizer as bbo
import pybrain.optimization.populationbased.multiobjective as mobj
import pybrain.optimization as allopts
from pybrain.rl.environments.functions.unimodal import SphereFunction
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.evolvables.evolvable import Evolvable
from pybrain.rl.environments.cartpole.balancetask import BalanceTask
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules.module import Module
# Tasks to be optimized:
# ----------------------
# simple function
sf = lambda x:-sum((x + 1) ** 2)
# FunctionEnvironment class
fe = SphereFunction
# initialized FE
ife1 = fe(1)
ife2 = fe(2)
ife100 = fe(100)
# a Task object
task = BalanceTask()
task.N = 10
# for the simple evolvable class defined below
evoEval = lambda e: e.x
# starting points
# ----------------------
xlist1 = [2.]
xlist2 = [0.2, 10]
xlist100 = list(range(12, 112))
xa1 = array(xlist1)
xa2 = array(xlist2)
xa100 = array(xlist100)
pc1 = ParameterContainer(1)
pc2 = ParameterContainer(2)
pc100 = ParameterContainer(100)
pc1._setParameters(xa1)
pc2._setParameters(xa2)
pc100._setParameters(xa100)
# for the task object, we need a module
nnet = buildNetwork(task.outdim, 2, task.indim)
# a mimimalistic Evolvable subclass that is not (like usual) a ParameterContainer
class SimpleEvo(Evolvable):
def __init__(self, x): self.x = x
def mutate(self): self.x += random() - 0.3
def copy(self): return SimpleEvo(self.x)
def randomize(self): self.x = 10 * random() - 2
def __repr__(self): return '--%.3f--' % self.x
evo1 = SimpleEvo(-3.)
# the test functions
# ----------------------
def testInterface(algo):
""" Tests whether the algorithm is properly implementing the
correct Blackbox-optimization interface."""
# without any arguments, initialization has to work
emptyalgo = algo()
try:
# but not learning
emptyalgo.learn(0)
return "Failed to throw missing evaluator error?"
except AssertionError:
pass
emptyalgo.setEvaluator(sf, xa1)
# not it can run
emptyalgo.learn(0)
# simple functions don't check for dimension mismatch
algo(sf, xa1)
algo(sf, xa100)
# for these, either an initial point or a dimension parameter is required
algo(sf, numParameters=2)
try:
algo(sf)
return "Failed to throw unknown dimension error"
except ValueError:
pass
# FitnessEvaluators do not require that
algo(ife1)
# parameter containers can be used too
algo(ife2, pc2)
return True
def testContinuousInterface(algo):
""" Test the specifics for the interface for ContinuousOptimizers """
if not issubclass(algo, bbo.ContinuousOptimizer):
return True
# list starting points are internally converted to arrays
x = algo(sf, xlist2)
assert isinstance(x.bestEvaluable, ndarray), 'not converted to array'
# check for dimension mismatch
try:
algo(ife1, xa2)
return "Failed to throw dimension mismatch error"
except ValueError:
pass
return True
def testMinMax(algo):
""" Verify that the algorithm is doing the minimization/maximization consistently. """
if (issubclass(algo, bbo.TopologyOptimizer)
or algo == allopts.StochasticHillClimber):
# TODO
return True
xa1[0] = 2
evalx = sf(xa1)
amax1 = algo(sf, xa1, minimize=False)
amax2 = algo(sf, xa1)
amax2.minimize = False
amax3 = algo()
amax3.setEvaluator(sf, xa1)
amax3.minimize = False
amax4 = algo()
amax4.minimize = False
amax4.setEvaluator(sf, xa1)
for i, amax in enumerate([amax1, amax2, amax3, amax4]):
assert amax.minimize is False or amax.mustMinimize, 'Max: Attribute not set correctly.' \
+ str(amax.minimize) + str(amax.mustMinimize) + str(i)
x, xv = amax.learn(1)
assert sf(x) == xv, 'Evaluation does not fit: ' + str((sf(x), xv))
assert xv >= evalx, 'Evaluation did not increase: ' + str(xv) + ' (init: ' + str(evalx) + ')'
xa1[0] = 2
amin1 = algo(sf, xa1, minimize=True)
amin2 = algo(sf, xa1)
amin2.minimize = True
amin3 = algo()
amin3.setEvaluator(sf, xa1)
amin3.minimize = True
amin4 = algo()
amin4.minimize = True
amin4.setEvaluator(sf, xa1)
for i, amin in enumerate([amin1, amin2, amin3, amin4]):
assert amin.minimize is True or amin.mustMaximize, 'Min: Attribute not set correctly.' \
+ str(amin.minimize) + str(amin.mustMaximize) + str(i)
x, xv = amin.learn(1)
assert sf(x) == xv, 'Evaluation does not fit: ' + str((sf(x), xv)) + str(i)
assert xv <= evalx, 'Evaluation did not decrease: ' + str(xv) + ' (init: ' + str(evalx) + ')' + str(i)
assert ((amin.minimize is not amax.minimize)
or not (amin._wasOpposed is amax._wasOpposed)), 'Inconsistent flags.'
return True
def testOnModuleAndTask(algo):
l = algo(task, nnet)
assert isinstance(l._bestFound()[0], Module), 'Did not return a module.'
return True
def testOnEvolvable(algo):
if issubclass(algo, bbo.ContinuousOptimizer):
return True
if issubclass(algo, bbo.TopologyOptimizer):
try:
algo(evoEval, evo1).learn(1)
return "Topology optimizers should not accept arbitrary Evolvables"
except AttributeError:
return True
else:
algo(evoEval, evo1).learn(1)
return True
# the main test procedure
# ------------------------
def testAll(tests, allalgos, tolerant=True):
countgood = 0
for i, algo in enumerate(sorted(allalgos)):
print(("%d, %s:" % (i + 1, algo.__name__)))
print((' ' * int(log10(i + 1) + 2),))
good = True
messages = []
for t in tests:
try:
res = t(algo)
except Exception as e:
if not tolerant:
raise e
res = e
if res is True:
print(('.',))
else:
good = False
messages.append(res)
print(('F',))
if good:
countgood += 1
print('--- OK.')
else:
print('--- NOT OK.')
for m in messages:
if m is not None:
print((' ' * int(log10(i + 1) + 2), '->', m))
print()
print(('Summary:', countgood, '/', len(allalgos), 'of test were passed.'))
if __name__ == '__main__':
from pybrain.optimization import * #@UnusedWildImport
#from pybrain.optimization import CMAES #@UnusedImport
allalgos = [c for c in list(globals().values()) if (isclass(c)
and issubclass(c, bbo.BlackBoxOptimizer)
and not issubclass(c, mobj.MultiObjectiveGA)
)]
print(('Optimization algorithms to be tested:', len(allalgos)))
print()
print('Note: this collection of tests may take quite some time.')
print()
tests = [testInterface,
testContinuousInterface,
testOnModuleAndTask,
testOnEvolvable,
testMinMax,
]
testAll(tests, allalgos, tolerant=True)
| 29.741007
| 110
| 0.626391
|
794ea1cf784734a0937d715f5b0f44271b7f6bd8
| 45
|
py
|
Python
|
hello_world.py
|
fanraul/python-hello-world
|
a208c62235455b5a0c978fc34c65888ed9277850
|
[
"MIT"
] | null | null | null |
hello_world.py
|
fanraul/python-hello-world
|
a208c62235455b5a0c978fc34c65888ed9277850
|
[
"MIT"
] | null | null | null |
hello_world.py
|
fanraul/python-hello-world
|
a208c62235455b5a0c978fc34c65888ed9277850
|
[
"MIT"
] | null | null | null |
print ('hello world!')
print ('hello github')
| 22.5
| 22
| 0.688889
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.