text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import os
import tempfile
import mock
from handroll.configuration import Configuration
from handroll.director import Director
from handroll.resolver import FileResolver
from handroll.site import Site
from handroll.tests import TestCase
class TestDirector(TestCase):
def test_generates_with_user_specified_outdir(self):
config = Configuration()
config.outdir = tempfile.mkdtemp()
site = self.factory.make_site()
marker = 'marker.txt'
open(os.path.join(site.path, marker), 'w').close()
director = Director(config, site, [])
director.produce()
out_marker = os.path.join(config.outdir, marker)
self.assertTrue(os.path.exists(out_marker))
def test_skips_file_with_skip_extension(self):
config = Configuration()
site = self.factory.make_site()
skip = 'to_my_lou.swp'
open(os.path.join(site.path, skip), 'w').close()
director = Director(config, site, [])
director.produce()
out_skip = os.path.join(site.output_root, skip)
self.assertFalse(os.path.exists(out_skip))
def test_skips_file_in_skip_list(self):
config = Configuration()
site = self.factory.make_site()
skip = Site.CONFIG
open(os.path.join(site.path, skip), 'w').close()
director = Director(config, site, [])
director.produce()
out_skip = os.path.join(site.output_root, skip)
self.assertFalse(os.path.exists(out_skip))
def test_skips_templates_directory(self):
config = Configuration()
site = self.factory.make_site()
templates = os.path.join(site.path, 'templates')
os.mkdir(templates)
director = Director(config, site, [])
director.produce()
out_templates = os.path.join(site.output_root, 'templates')
self.assertFalse(os.path.exists(out_templates))
def test_does_timing(self):
mock_time = mock.Mock()
mock_time.return_value = 42.0 # Return float so that format works.
site = self.factory.make_site()
open(os.path.join(site.path, 'fake.md'), 'w').close()
config = Configuration()
config.timing = True
director = Director(config, site, [])
with mock.patch('handroll.director.time.time', mock_time):
director.produce()
self.assertTrue(mock_time.called)
def test_generates_output_directory(self):
config = Configuration()
site = self.factory.make_site()
another = os.path.join(site.path, 'another')
os.mkdir(another)
director = Director(config, site, [])
director.produce()
another_out = os.path.join(site.output_root, 'another')
self.assertTrue(os.path.isdir(another_out))
def test_process_file_ignores_files_already_in_output(self):
# This condition is checked because the output directory can be within
# the source (e.g., the default of storing results in 'output'). If
# the watcher is watching for any changes in site source, then
# processing files in the output directory could lead the watcher into
# an infinite loop.
config = Configuration()
site = self.factory.make_site()
config.outdir = os.path.join(site.path, 'outdir')
os.mkdir(config.outdir)
marker = os.path.join(config.outdir, 'marker.md')
open(marker, 'w').close()
director = Director(config, site, [])
director.process_file(marker)
marker_output = os.path.join(config.outdir, 'marker.html')
self.assertFalse(os.path.exists(marker_output))
def test_process_directory_ignores_directories_already_in_output(self):
# Avoid processing directories in output for the same reason that
# file processing is skipped.
config = Configuration()
site = self.factory.make_site()
config.outdir = os.path.join(site.path, 'outdir')
os.mkdir(config.outdir)
directory = os.path.join(config.outdir, 'directory')
os.mkdir(directory)
director = Director(config, site, [])
director.process_directory(directory)
directory_output = os.path.join(config.outdir, 'outdir', 'directory')
self.assertFalse(os.path.exists(directory_output))
def test_file_in_source_and_outdir_is_ignored(self):
"""A source file is ignored when the source dir is in the outdir."""
config = Configuration()
config.outdir = tempfile.mkdtemp()
site_path = os.path.join(config.outdir, 'site')
site = Site(site_path)
director = Director(config, site, [])
fake_file = os.path.join(site_path, 'fake')
is_in_output = director.is_in_output(fake_file)
self.assertFalse(is_in_output)
@mock.patch('handroll.director.signals')
def test_produce_triggers_post_composition(self, signals):
config = Configuration()
site = self.factory.make_site()
director = Director(config, site, [])
director.produce()
signals.post_composition.send.assert_called_once_with(director)
@mock.patch('handroll.director.signals')
def test_process_file_triggers_post_composition(self, signals):
config = Configuration()
site = self.factory.make_site()
director = Director(config, site, [])
marker = os.path.join(site.path, 'marker.txt')
open(marker, 'w').close()
director.process_file(marker)
signals.post_composition.send.assert_called_once_with(director)
@mock.patch('handroll.director.signals')
def test_process_directory_triggers_post_composition(self, signals):
config = Configuration()
site = self.factory.make_site()
director = Director(config, site, [])
config.outdir = os.path.join(site.path, 'outdir')
os.mkdir(config.outdir)
directory = os.path.join(site.path, 'directory')
os.mkdir(directory)
director.process_directory(directory)
signals.post_composition.send.assert_called_once_with(director)
@mock.patch('handroll.director.signals')
def test_produce_triggers_pre_composition(self, signals):
config = Configuration()
site = self.factory.make_site()
director = Director(config, site, [])
director.produce()
signals.pre_composition.send.assert_called_once_with(director)
@mock.patch('handroll.director.signals')
def test_process_file_triggers_pre_composition(self, signals):
config = Configuration()
site = self.factory.make_site()
director = Director(config, site, [])
marker = os.path.join(site.path, 'marker.txt')
open(marker, 'w').close()
director.process_file(marker)
signals.pre_composition.send.assert_called_once_with(director)
@mock.patch('handroll.director.signals')
def test_process_directory_triggers_pre_composition(self, signals):
config = Configuration()
site = self.factory.make_site()
director = Director(config, site, [])
config.outdir = os.path.join(site.path, 'outdir')
os.mkdir(config.outdir)
directory = os.path.join(site.path, 'directory')
os.mkdir(directory)
director.process_directory(directory)
signals.pre_composition.send.assert_called_once_with(director)
def test_process_file_ignores_templates(self):
config = Configuration()
site = self.factory.make_site()
default = os.path.join(site.path, 'template.html')
open(default, 'w').close()
config.outdir = os.path.join(site.path, 'outdir')
os.mkdir(config.outdir)
director = Director(config, site, [])
director.process_file(default)
default_output = os.path.join(config.outdir, 'template.html')
self.assertFalse(os.path.exists(default_output))
def test_process_directory_ignores_templates(self):
config = Configuration()
site = self.factory.make_site()
config.outdir = os.path.join(site.path, 'outdir')
os.mkdir(config.outdir)
director = Director(config, site, [])
directory = os.path.join(director.catalog.templates_path, 'test')
os.makedirs(directory)
director.process_directory(directory)
directory_output = os.path.join(
config.outdir, director.catalog.TEMPLATES_DIR, 'test')
self.assertFalse(os.path.exists(directory_output))
def test_has_resolver(self):
director = self.factory.make_director()
self.assertTrue(isinstance(director.resolver, FileResolver))
@mock.patch('handroll.director.signals')
def test_process_file_ignores_skip_files(self, signals):
director = self.factory.make_director()
director.process_file('fake.swp')
self.assertFalse(signals.pre_composition.called)
|
{
"content_hash": "8d6f9b20bc0b01b32c648db906e59c35",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 78,
"avg_line_length": 36.08097165991903,
"alnum_prop": 0.6435143626570916,
"repo_name": "handroll/handroll",
"id": "431e24cbcbcdf2d29995302eb6aa2256f4a4b08a",
"size": "8947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handroll/tests/test_director.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "342"
},
{
"name": "Python",
"bytes": "170950"
}
],
"symlink_target": ""
}
|
"""Wrapper for creating the swimmer environment."""
import math
import numpy as np
import mujoco_py
import os
from gym import utils
from gym.envs.mujoco import mujoco_env
from d4rl.locomotion import mujoco_goal_env
from d4rl.locomotion import goal_reaching_env
from d4rl.locomotion import maze_env
from d4rl import offline_env
GYM_ASSETS_DIR = os.path.join(
os.path.dirname(mujoco_env.__file__),
'assets')
class SwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
"""Basic swimmer locomotion environment."""
FILE = os.path.join(GYM_ASSETS_DIR, 'swimmer.xml')
def __init__(self, file_path=None, expose_all_qpos=False, non_zero_reset=False):
if file_path is None:
file_path = self.FILE
self._expose_all_qpos = expose_all_qpos
mujoco_env.MujocoEnv.__init__(self, file_path, 5)
utils.EzPickle.__init__(self)
@property
def physics(self):
# Check mujoco version is greater than version 1.50 to call correct physics
# model containing PyMjData object for getting and setting position/velocity.
# Check https://github.com/openai/mujoco-py/issues/80 for updates to api.
if mujoco_py.get_version() >= '1.50':
return self.sim
else:
return self.model
def _step(self, a):
return self.step(a)
def step(self, a):
ctrl_cost_coeff = 0.0001
xposbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.sim.data.qpos[0]
reward_fwd = (xposafter - xposbefore) / self.dt
reward_ctrl = - ctrl_cost_coeff * np.square(a).sum()
reward = reward_fwd + reward_ctrl
ob = self._get_obs()
return ob, reward, False, dict(reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)
def _get_obs(self):
if self._expose_all_qpos:
obs = np.concatenate([
self.physics.data.qpos.flat[:5], # Ensures only swimmer obs.
self.physics.data.qvel.flat[:5],
])
else:
obs = np.concatenate([
self.physics.data.qpos.flat[2:5],
self.physics.data.qvel.flat[:5],
])
return obs
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
# Set everything other than swimmer to original position and 0 velocity.
qpos[5:] = self.init_qpos[5:]
qvel[5:] = 0.
self.set_state(qpos, qvel)
return self._get_obs()
def get_xy(self):
return self.physics.data.qpos[:2]
def set_xy(self, xy):
qpos = np.copy(self.physics.data.qpos)
qpos[0] = xy[0]
qpos[1] = xy[1]
qvel = self.physics.data.qvel
self.set_state(qpos, qvel)
class GoalReachingSwimmerEnv(goal_reaching_env.GoalReachingEnv, SwimmerEnv):
"""Swimmer locomotion rewarded for goal-reaching."""
BASE_ENV = SwimmerEnv
def __init__(self, goal_sampler=goal_reaching_env.disk_goal_sampler,
file_path=None,
expose_all_qpos=False, non_zero_reset=False, eval=False, reward_type="dense", **kwargs):
goal_reaching_env.GoalReachingEnv.__init__(self, goal_sampler, eval=eval, reward_type=reward_type)
SwimmerEnv.__init__(self,
file_path=file_path,
expose_all_qpos=expose_all_qpos,
non_zero_reset=non_zero_reset)
class SwimmerMazeEnv(maze_env.MazeEnv, GoalReachingSwimmerEnv, offline_env.OfflineEnv):
"""Swimmer navigating a maze."""
LOCOMOTION_ENV = GoalReachingSwimmerEnv
def __init__(self, goal_sampler=None, expose_all_qpos=True,
reward_type='dense',
*args, **kwargs):
if goal_sampler is None:
goal_sampler = lambda np_rand: maze_env.MazeEnv.goal_sampler(self, np_rand)
maze_env.MazeEnv.__init__(
self, *args, manual_collision=False,
goal_sampler=goal_sampler,
expose_all_qpos=expose_all_qpos,
reward_type=reward_type,
**kwargs)
offline_env.OfflineEnv.__init__(self, **kwargs)
def set_target(self, target_location=None):
return self.set_target_goal(target_location)
|
{
"content_hash": "c04e33a85292dac8181ed40112f53275",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 103,
"avg_line_length": 32.664,
"alnum_prop": 0.6531961792799412,
"repo_name": "rail-berkeley/d4rl",
"id": "bb8282a7d18d5dde1a993b6241a6f40735c7f6c9",
"size": "4083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "d4rl/locomotion/swimmer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "573379"
},
{
"name": "Shell",
"bytes": "627"
}
],
"symlink_target": ""
}
|
"""Hyperparameter values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import re
import six
from tensorflow.contrib.training.python.training import hparam_pb2
from tensorflow.python.framework import ops
from tensorflow.python.util import compat
def parse_values(values, type_map):
"""Parses hyperparameter values from a string into a python map..
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multiple times in `values`, the last
value is used.
The `value` in `name=value` must follows the syntax according to the
type of the parameter:
* Scalar integer: A Python-parsable integer point value. E.g.: 1,
100, -12.
* Scalar float: A Python-parsable floating point value. E.g.: 1.0,
-.54e89.
* Boolean: Either true or false.
* Scalar string: A non-empty sequence of characters, excluding comma,
spaces, and square brackets. E.g.: foo, bar_1.
* List: A comma separated list of scalar values of the parameter type
enclosed in square backets. E.g.: [1,2,3], [1.0,1e-12], [high,low].
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
type_map: A dictionary mapping hyperparameter names to types. Note every
parameter name in values must be a key in type_map. The values must
conform to the types indicated, where a value V is said to conform to a
type T if either V has type T, or V is a list of elements of type T.
Hence, for a multidimensional parameter 'x' taking float values,
'x=[0.1,0.2]' will parse successfully if type_map['x'] = float.
Returns:
A python map containing the name, value pairs.
Raises:
ValueError: If `values` cannot be parsed.
"""
ret = {}
param_re = re.compile(
r'(?P<name>[a-zA-Z][\w]*)\s*=\s*'
r'((?P<val>[^,\[]*)|\[(?P<vals>[^\]]*)\])($|,)')
pos = 0
while pos < len(values):
m = param_re.match(values, pos)
if not m:
raise ValueError('Malformed hyperparameter value: %s' % values[pos:])
# Check that there is a comma between parameters and move past it.
pos = m.end()
# Parse the values.
m_dict = m.groupdict()
name = m_dict['name']
if name not in type_map:
raise ValueError('Unknown hyperparameter type for %s' % name)
type_ = type_map[name]
def parse_fail(value):
raise ValueError(
'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s'
% (name, type_.__name__, value, values))
if type_ == bool:
def parse_bool(value):
if value == 'true':
return True
elif value == 'false':
return False
else:
try:
return bool(int(value))
except (ValueError, TypeError):
parse_fail(value)
parse = parse_bool
else:
parse = type_
if m_dict['val'] is not None:
try:
ret[name] = parse(m_dict['val'])
except (ValueError, TypeError):
parse_fail(m_dict['val'])
elif m_dict['vals'] is not None:
elements = filter(None, re.split('[ ,]', m_dict['vals']))
try:
ret[name] = [parse(e) for e in elements]
except (ValueError, TypeError):
parse_fail(m_dict['vals'])
else:
parse_fail('')
return ret
class HParams(object):
"""Class to hold a set of hyperparameters as name-value pairs.
A `HParams` object holds hyperparameters used to build and train a model,
such as the number of hidden units in a neural net layer or the learning rate
to use when training.
You first create a `HParams` object by specifying the names and values of the
hyperparameters.
To make them easily accessible the parameter names are added as direct
attributes of the class. A typical usage is as follows:
```python
# Create a HParams object specifying names and values of the model
# hyperparameters:
hparams = HParams(learning_rate=0.1, num_hidden_units=100)
# The hyperparameter are available as attributes of the HParams object:
hparams.learning_rate ==> 0.1
hparams.num_hidden_units ==> 100
```
Hyperparameters have type, which is inferred from the type of their value
passed at construction type. The currently supported types are: integer,
float, string, and list of integer, float, or string.
You can override hyperparameter values by calling the
[`parse()`](#HParams.parse) method, passing a string of comma separated
`name=value` pairs. This is intended to make it possible to override
any hyperparameter values from a single command-line flag to which
the user passes 'hyper-param=value' pairs. It avoids having to define
one flag for each hyperparameter.
The syntax expected for each value depends on the type of the parameter.
See `parse()` for a description of the syntax.
Example:
```python
# Define a command line flag to pass name=value pairs.
# For example using argparse:
import argparse
parser = argparse.ArgumentParser(description='Train my model.')
parser.add_argument('--hparams', type=str,
help='Comma seperated list of "name=value" pairs.')
args = parser.parse_args()
...
def my_program():
# Create a HParams object specifying the names and values of the
# model hyperparameters:
hparams = tf.HParams(learning_rate=0.1, num_hidden_units=100,
activations=['relu', 'tanh'])
# Override hyperparameters values by parsing the command line
hparams.parse(args.hparams)
# If the user passed `--hparams=learning_rate=0.3` on the command line
# then 'hparams' has the following attributes:
hparams.learning_rate ==> 0.3
hparams.num_hidden_units ==> 100
hparams.activations ==> ['relu', 'tanh']
# If the hyperparameters are in json format use parse_json:
hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}')
```
"""
def __init__(self, hparam_def=None, model_structure=None, **kwargs):
"""Create an instance of `HParams` from keyword arguments.
The keyword arguments specify name-values pairs for the hyperparameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `HParams` object, so they
can be accessed directly with the dot notation `hparams._name_`.
Example:
```python
# Define 3 hyperparameters: 'learning_rate' is a float parameter,
# 'num_hidden_units' an integer parameter, and 'activation' a string
# parameter.
hparams = tf.HParams(
learning_rate=0.1, num_hidden_units=100, activation='relu')
hparams.activation ==> 'relu'
```
Note that a few names are reserved and cannot be used as hyperparameter
names. If you use one of the reserved name the constructor raises a
`ValueError`.
Args:
hparam_def: Serialized hyperparameters, encoded as a hparam_pb2.HParamDef
protocol buffer. If provided, this object is initialized by
deserializing hparam_def. Otherwise **kwargs is used.
model_structure: An instance of ModelStructure, defining the feature
crosses to be used in the Trial.
**kwargs: Key-value pairs where the key is the hyperparameter name and
the value is the value for the parameter.
Raises:
ValueError: If both `hparam_def` and initialization values are provided,
or if one of the arguments is invalid.
"""
# Register the hyperparameters and their type in _hparam_types.
# This simplifies the implementation of parse().
# _hparam_types maps the parameter name to a tuple (type, bool).
# The type value is the type of the parameter for scalar hyperparameters,
# or the type of the list elements for multidimensional hyperparameters.
# The bool value is True if the value is a list, False otherwise.
self._hparam_types = {}
self._model_structure = model_structure
if hparam_def:
self._init_from_proto(hparam_def)
if kwargs:
raise ValueError('hparam_def and initialization values are '
'mutually exclusive')
else:
for name, value in six.iteritems(kwargs):
self.add_hparam(name, value)
def _init_from_proto(self, hparam_def):
"""Creates a new HParams from `HParamDef` protocol buffer.
Args:
hparam_def: `HParamDef` protocol buffer.
"""
assert isinstance(hparam_def, hparam_pb2.HParamDef)
for name, value in hparam_def.hparam.items():
kind = value.WhichOneof('kind')
if kind.endswith('_value'):
# Single value.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, int(getattr(value, kind)))
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(name, compat.as_str(getattr(value, kind)))
else:
self.add_hparam(name, getattr(value, kind))
else:
# List of values.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, [int(v) for v in getattr(value, kind).value])
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(name, [compat.as_str(v)
for v in getattr(value, kind).value])
else:
self.add_hparam(name, [v for v in getattr(value, kind).value])
def add_hparam(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# hyperparameter name.
if getattr(self, name, None) is not None:
raise ValueError('Hyperparameter name is reserved: %s' % name)
if isinstance(value, (list, tuple)):
if not value:
raise ValueError('Multi-valued hyperparameters cannot be empty: %s'
% name)
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
def parse(self, values):
"""Override hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed.
"""
type_map = dict()
for name, t in self._hparam_types.items():
param_type, _ = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self._set_from_map(values_map)
def _set_from_map(self, values_map):
"""Override hyperparameter values, parsing new values from a dictionary.
Args:
values_map: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values_map` cannot be parsed.
"""
for name, value in values_map.items():
_, is_list = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError(
'Must not pass a list for single-valued parameter: %s' % name)
setattr(self, name, value)
else:
if is_list:
raise ValueError(
'Must pass a list for multi-valued parameter: %s.' % name)
setattr(self, name, value)
return self
def set_model_structure(self, model_structure):
self._model_structure = model_structure
def get_model_structure(self):
return self._model_structure
def to_json(self):
"""Serializes the hyperparameters into JSON.
Returns:
A JSON string.
"""
return json.dumps(self.values())
def parse_json(self, values_json):
"""Override hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values_json` cannot be parsed.
"""
values_map = json.loads(values_json)
return self._set_from_map(values_map)
def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
"""
return {n: getattr(self, n) for n in self._hparam_types.keys()}
def __str__(self):
return str(sorted(self.values().items()))
@staticmethod
def _get_kind_name(param_type, is_list):
"""Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized.
"""
if issubclass(param_type, bool):
# This check must happen before issubclass(param_type, six.integer_types),
# since Python considers bool to be a subclass of int.
typename = 'bool'
elif issubclass(param_type, six.integer_types):
# Setting 'int' and 'long' types to be 'int64' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
# Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported paramter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix])
def to_proto(self, export_scope=None): # pylint: disable=unused-argument
"""Converts a `HParams` object to a `HParamDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `HParamDef` protocol buffer.
"""
hparam_proto = hparam_pb2.HParamDef()
for name in self._hparam_types:
# Parse the values.
param_type, is_list = self._hparam_types.get(name, (None, None))
kind = HParams._get_kind_name(param_type, is_list)
if is_list:
if kind.startswith('bytes'):
v_list = [compat.as_bytes(v) for v in getattr(self, name)]
else:
v_list = [v for v in getattr(self, name)]
getattr(hparam_proto.hparam[name], kind).value.extend(v_list)
else:
v = getattr(self, name)
if kind.startswith('bytes'):
v = compat.as_bytes(getattr(self, name))
setattr(hparam_proto.hparam[name], kind, v)
return hparam_proto
@staticmethod
def from_proto(hparam_def, import_scope=None): # pylint: disable=unused-argument
return HParams(hparam_def=hparam_def)
ops.register_proto_function('hparams',
proto_type=hparam_pb2.HParamDef,
to_proto=HParams.to_proto,
from_proto=HParams.from_proto)
|
{
"content_hash": "88eebe168cebb8d5a53e999c2540c4a4",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 83,
"avg_line_length": 35.45796460176991,
"alnum_prop": 0.6512135770886629,
"repo_name": "markslwong/tensorflow",
"id": "1d1778282072b6aacb26807bf2732c3f6f91202d",
"size": "16717",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/training/python/training/hparam.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "182510"
},
{
"name": "C++",
"bytes": "23696056"
},
{
"name": "CMake",
"bytes": "158237"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "824401"
},
{
"name": "HTML",
"bytes": "886772"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "14005"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37302"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "214419"
},
{
"name": "Python",
"bytes": "20624231"
},
{
"name": "Shell",
"bytes": "337420"
},
{
"name": "TypeScript",
"bytes": "1267602"
}
],
"symlink_target": ""
}
|
from django.forms import DateTimeField, ModelForm, Textarea
from .models import Location, Room, Schedule, Alert
class ScheduleForm(ModelForm):
start = DateTimeField(input_formats=['%d/%m/%Y %H:%M', ])
end = DateTimeField(input_formats=['%d/%m/%Y %H:%M', ])
widgets = {
'room': Textarea(attrs={'cols': 60, 'rows': 1}),
}
class Meta:
model = Schedule
fields = "__all__"
exclude = ['active', 'scheduled']
|
{
"content_hash": "293c44afc751c2b863f5b4b397d5fc80",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 61,
"avg_line_length": 27,
"alnum_prop": 0.5969498910675382,
"repo_name": "maurobaraldi/brms",
"id": "f7eb7d270530b0bda2e837a225ebba8c216675a3",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brms/apps/rooms/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "93985"
},
{
"name": "HTML",
"bytes": "28070"
},
{
"name": "JavaScript",
"bytes": "8975"
},
{
"name": "Python",
"bytes": "20486"
}
],
"symlink_target": ""
}
|
<<<<<<< HEAD
<<<<<<< HEAD
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.script_helper import assert_python_ok
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
=======
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.script_helper import assert_python_ok
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.script_helper import assert_python_ok
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
{
"content_hash": "fab57802ba2271bc2c33eadb7fac66c6",
"timestamp": "",
"source": "github",
"line_count": 2061,
"max_line_length": 89,
"avg_line_length": 34.752062105773895,
"alnum_prop": 0.6058025242935329,
"repo_name": "ArcherSys/ArcherSys",
"id": "35518eaa536e864afc54d00f7ab56fdff94ffead",
"size": "71624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/test/test_concurrent_futures.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""Main entry point into the EC2 Credentials service.
This service allows the creation of access/secret credentials used for
the ec2 interop layer of OpenStack.
A user can create as many access/secret pairs, each of which map to a
specific tenant. This is required because OpenStack supports a user
belonging to multiple tenants, whereas the signatures created on ec2-style
requests don't allow specification of which tenant the user wishs to act
upon.
To complete the cycle, we provide a method that OpenStack services can
use to validate a signature and get a corresponding openstack token. This
token allows method calls to other services within the context the
access/secret was created. As an example, nova requests keystone to validate
the signature of a request, receives a token, and then makes a request to
glance to list images needed to perform the requested task.
"""
import uuid
from keystone import catalog
from keystone.common import manager
from keystone.common import utils
from keystone.common import wsgi
from keystone import config
from keystone import exception
from keystone import identity
from keystone import policy
from keystone import service
from keystone import token
CONF = config.CONF
class Manager(manager.Manager):
"""Default pivot point for the EC2 Credentials backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
def __init__(self):
super(Manager, self).__init__(CONF.ec2.driver)
class Ec2Extension(wsgi.ExtensionRouter):
def add_routes(self, mapper):
ec2_controller = Ec2Controller()
# validation
mapper.connect(
'/ec2tokens',
controller=ec2_controller,
action='authenticate',
conditions=dict(method=['POST']))
# crud
mapper.connect(
'/users/{user_id}/credentials/OS-EC2',
controller=ec2_controller,
action='create_credential',
conditions=dict(method=['POST']))
mapper.connect(
'/users/{user_id}/credentials/OS-EC2',
controller=ec2_controller,
action='get_credentials',
conditions=dict(method=['GET']))
mapper.connect(
'/users/{user_id}/credentials/OS-EC2/{credential_id}',
controller=ec2_controller,
action='get_credential',
conditions=dict(method=['GET']))
mapper.connect(
'/users/{user_id}/credentials/OS-EC2/{credential_id}',
controller=ec2_controller,
action='delete_credential',
conditions=dict(method=['DELETE']))
class Ec2Controller(wsgi.Application):
def __init__(self):
self.catalog_api = catalog.Manager()
self.identity_api = identity.Manager()
self.token_api = token.Manager()
self.policy_api = policy.Manager()
self.ec2_api = Manager()
super(Ec2Controller, self).__init__()
def check_signature(self, creds_ref, credentials):
signer = utils.Ec2Signer(creds_ref['secret'])
signature = signer.generate(credentials)
if utils.auth_str_equal(credentials['signature'], signature):
return
# NOTE(vish): Some libraries don't use the port when signing
# requests, so try again without port.
elif ':' in credentials['signature']:
hostname, _port = credentials['host'].split(':')
credentials['host'] = hostname
signature = signer.generate(credentials)
if not utils.auth_str_equal(credentials.signature, signature):
raise exception.Unauthorized(message='Invalid EC2 signature.')
else:
raise exception.Unauthorized(message='EC2 signature not supplied.')
def authenticate(self, context, credentials=None, ec2Credentials=None):
"""Validate a signed EC2 request and provide a token.
Other services (such as Nova) use this **admin** call to determine
if a request they signed received is from a valid user.
If it is a valid signature, an openstack token that maps
to the user/tenant is returned to the caller, along with
all the other details returned from a normal token validation
call.
The returned token is useful for making calls to other
OpenStack services within the context of the request.
:param context: standard context
:param credentials: dict of ec2 signature
:param ec2Credentials: DEPRECATED dict of ec2 signature
:returns: token: openstack token equivalent to access key along
with the corresponding service catalog and roles
"""
# FIXME(ja): validate that a service token was used!
# NOTE(termie): backwards compat hack
if not credentials and ec2Credentials:
credentials = ec2Credentials
if not 'access' in credentials:
raise exception.Unauthorized(message='EC2 signature not supplied.')
creds_ref = self._get_credentials(context,
credentials['access'])
self.check_signature(creds_ref, credentials)
# TODO(termie): don't create new tokens every time
# TODO(termie): this is copied from TokenController.authenticate
token_id = uuid.uuid4().hex
tenant_ref = self.identity_api.get_tenant(
context=context,
tenant_id=creds_ref['tenant_id'])
user_ref = self.identity_api.get_user(
context=context,
user_id=creds_ref['user_id'])
metadata_ref = self.identity_api.get_metadata(
context=context,
user_id=user_ref['id'],
tenant_id=tenant_ref['id'])
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=user_ref['id'],
tenant_id=tenant_ref['id'],
metadata=metadata_ref)
token_ref = self.token_api.create_token(
context, token_id, dict(id=token_id,
user=user_ref,
tenant=tenant_ref,
metadata=metadata_ref))
# TODO(termie): optimize this call at some point and put it into the
# the return for metadata
# fill out the roles in the metadata
roles_ref = []
for role_id in metadata_ref.get('roles', []):
roles_ref.append(self.identity_api.get_role(context, role_id))
# TODO(termie): make this a util function or something
# TODO(termie): i don't think the ec2 middleware currently expects a
# full return, but it contains a note saying that it
# would be better to expect a full return
token_controller = service.TokenController()
return token_controller._format_authenticate(
token_ref, roles_ref, catalog_ref)
def create_credential(self, context, user_id, tenant_id):
"""Create a secret/access pair for use with ec2 style auth.
Generates a new set of credentials that map the the user/tenant
pair.
:param context: standard context
:param user_id: id of user
:param tenant_id: id of tenant
:returns: credential: dict of ec2 credential
"""
if not self._is_admin(context):
self._assert_identity(context, user_id)
self._assert_valid_user_id(context, user_id)
self._assert_valid_tenant_id(context, tenant_id)
cred_ref = {'user_id': user_id,
'tenant_id': tenant_id,
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex}
self.ec2_api.create_credential(context, cred_ref['access'], cred_ref)
return {'credential': cred_ref}
def get_credentials(self, context, user_id):
"""List all credentials for a user.
:param context: standard context
:param user_id: id of user
:returns: credentials: list of ec2 credential dicts
"""
if not self._is_admin(context):
self._assert_identity(context, user_id)
self._assert_valid_user_id(context, user_id)
return {'credentials': self.ec2_api.list_credentials(context, user_id)}
def get_credential(self, context, user_id, credential_id):
"""Retreive a user's access/secret pair by the access key.
Grab the full access/secret pair for a given access key.
:param context: standard context
:param user_id: id of user
:param credential_id: access key for credentials
:returns: credential: dict of ec2 credential
"""
if not self._is_admin(context):
self._assert_identity(context, user_id)
self._assert_valid_user_id(context, user_id)
creds = self._get_credentials(context, credential_id)
return {'credential': creds}
def delete_credential(self, context, user_id, credential_id):
"""Delete a user's access/secret pair.
Used to revoke a user's access/secret pair
:param context: standard context
:param user_id: id of user
:param credential_id: access key for credentials
:returns: bool: success
"""
if not self._is_admin(context):
self._assert_identity(context, user_id)
self._assert_owner(context, user_id, credential_id)
self._assert_valid_user_id(context, user_id)
self._get_credentials(context, credential_id)
return self.ec2_api.delete_credential(context, credential_id)
def _get_credentials(self, context, credential_id):
"""Return credentials from an ID.
:param context: standard context
:param credential_id: id of credential
:raises exception.Unauthorized: when credential id is invalid
:returns: credential: dict of ec2 credential.
"""
creds = self.ec2_api.get_credential(context,
credential_id)
if not creds:
raise exception.Unauthorized(message='EC2 access key not found.')
return creds
def _assert_identity(self, context, user_id):
"""Check that the provided token belongs to the user.
:param context: standard context
:param user_id: id of user
:raises exception.Forbidden: when token is invalid
"""
try:
token_ref = self.token_api.get_token(
context=context,
token_id=context['token_id'])
except exception.TokenNotFound:
raise exception.Unauthorized()
token_user_id = token_ref['user'].get('id')
if not token_user_id == user_id:
raise exception.Forbidden()
def _is_admin(self, context):
"""Wrap admin assertion error return statement.
:param context: standard context
:returns: bool: success
"""
try:
self.assert_admin(context)
return True
except exception.Forbidden:
return False
def _assert_owner(self, context, user_id, credential_id):
"""Ensure the provided user owns the credential.
:param context: standard context
:param user_id: expected credential owner
:param credential_id: id of credential object
:raises exception.Forbidden: on failure
"""
cred_ref = self.ec2_api.get_credential(context, credential_id)
if not user_id == cred_ref['user_id']:
raise exception.Forbidden()
def _assert_valid_user_id(self, context, user_id):
"""Ensure a valid user id.
:param context: standard context
:param user_id: expected credential owner
:raises exception.UserNotFound: on failure
"""
user_ref = self.identity_api.get_user(
context=context,
user_id=user_id)
if not user_ref:
raise exception.UserNotFound(user_id=user_id)
def _assert_valid_tenant_id(self, context, tenant_id):
"""Ensure a valid tenant id.
:param context: standard context
:param user_id: expected credential owner
:raises exception.UserNotFound: on failure
"""
tenant_ref = self.identity_api.get_tenant(
context=context,
tenant_id=tenant_id)
if not tenant_ref:
raise exception.TenantNotFound(tenant_id=tenant_id)
|
{
"content_hash": "3873dada0d200fe2ce0fa727d92997c2",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 79,
"avg_line_length": 37.461309523809526,
"alnum_prop": 0.6211170255025026,
"repo_name": "tylertian/Openstack",
"id": "5bf40103b1b7a3a7eed9690c43806ecf30e56363",
"size": "13211",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openstack F/keystone/keystone/contrib/ec2/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "239919"
},
{
"name": "JavaScript",
"bytes": "156942"
},
{
"name": "Python",
"bytes": "16949418"
},
{
"name": "Shell",
"bytes": "96743"
}
],
"symlink_target": ""
}
|
"""Simple commmandline utility that prints all Jupyter paths"""
from jupyter_core import paths
def printVars(names):
"""Pretty print path vars by name
Parameters
----------
names list(str) - variable names"""
# Calculate the left column size
leftCol = max([len(name) for name in names]) + 1
space = ' ' * leftCol
# Print each var
for name in names:
# If the var is actually a method, invoke it.
values = getattr(paths, name)
if callable(values):
values = values()
# If this is a list, print the var name only on the first row.
if isinstance(values, list):
values = [str(value) for value in values]
print(name + (' ' * (leftCol - len(name))) + values[0])
# Followed by left column padding and the rest of the list
if len(values) > 1:
for value in values[1:]:
print(space + value)
# If it's not a list, print the var name and var value.
else:
print(name + (' ' * (leftCol - len(name))) + str(values))
# Print the most important variables first
print('Paths\n-----')
printVars([
'jupyter_config_dir',
'jupyter_config_path',
'jupyter_data_dir',
'jupyter_path',
'jupyter_runtime_dir'
])
# Print the variables used to calculate other variables second.
print('\n\nInternals\n---------')
printVars([
'ENV_CONFIG_PATH',
'ENV_JUPYTER_PATH',
'SYSTEM_CONFIG_PATH',
'SYSTEM_JUPYTER_PATH'
])
|
{
"content_hash": "b837fc0d718110e097572350a1ad5f23",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 70,
"avg_line_length": 28.745454545454546,
"alnum_prop": 0.5641998734977862,
"repo_name": "jovyan/jpypaths",
"id": "532491776352d9c72a00fc4c99c87c6afe7674f9",
"size": "1581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jpypaths.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1581"
}
],
"symlink_target": ""
}
|
from pybuilder.core import task, depends, description, use_plugin
use_plugin("core")
@task
@description("Execute analysis plugins.")
@depends("run_unit_tests")
def analyze():
pass
|
{
"content_hash": "6a895c6b0fd4f4c08a268946f508b42e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 65,
"avg_line_length": 18.7,
"alnum_prop": 0.732620320855615,
"repo_name": "Danielweber7624/pybuilder",
"id": "ed8201c8c073dac236a18e53b385a589c35be281",
"size": "856",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "src/main/python/pybuilder/plugins/analysis_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "2886"
},
{
"name": "Python",
"bytes": "473103"
}
],
"symlink_target": ""
}
|
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
#################################################################################################################################################
# NOTE:
# The software for this sensor is still in development and might make your GrovePi unuable as long as this sensor is connected with the GrovePi
#################################################################################################################################################
import time,sys
import smbus
#bus = smbus.SMBus(0) #GEN1_I2C
bus = smbus.SMBus(1) #GEN2_I2C
#bus = smbus.SMBus(4) #PWR_I2C
class grove_fingerclip_heart_sensor:
address = 0x50
def pulse_read(self):
print bus.read_byte(0x50)
# return bus.read_i2c_block_data(self.address, 1,1)
if __name__ == "__main__":
pulse= grove_fingerclip_heart_sensor()
while True:
try:
pulse.pulse_read()
except IOError:
print "Error"
time.sleep(.5)
|
{
"content_hash": "81648757be4784860d113f2db305b7a5",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 145,
"avg_line_length": 39.82142857142857,
"alnum_prop": 0.668609865470852,
"repo_name": "NeuroRoboticTech/Jetduino",
"id": "bde7bb7e10b5decf50b3c38c81686527419f9123",
"size": "2675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Software/Python/grove_fingerclip_heart_sensor/grove_fingerclip_heart_sensor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "37042"
},
{
"name": "C",
"bytes": "38867"
},
{
"name": "C#",
"bytes": "33014"
},
{
"name": "C++",
"bytes": "101883"
},
{
"name": "CMake",
"bytes": "3553"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "JavaScript",
"bytes": "30142"
},
{
"name": "Python",
"bytes": "568027"
},
{
"name": "Shell",
"bytes": "17661"
}
],
"symlink_target": ""
}
|
"""
Goal Sentry API
Models
"""
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from datetime import datetime as dt
from sqlalchemy.orm import relationship
from database import Base
class User(Base):
__tablename__ = 'users'
# Basic metadata
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True)
name = Column(String(120))
email = Column(String(120))
rank = Column(Integer)
# Create a one-to-many relationship with Score
scores = relationship("Score")
def __init__(self, username=None, name=None, email=None):
self.username = username
self.name = name
self.email = email.lower()
self.rank = 0
def __repr__(self):
return '<User %r>' % self.username
class Table(Base):
__tablename__ = 'tables'
# Basic metadata
id = Column(Integer, primary_key=True)
name = Column(String(120))
# Create a one-to-many relationship with Game
games = relationship("Game")
def __init__(self, name=None):
self.name = name
def __repr__(self):
return '<Table %r>' % self.id
class Game(Base):
__tablename__ = 'games'
# Basic metadata
id = Column(Integer, primary_key=True)
time_started = Column(DateTime)
time_completed = Column(DateTime)
# Create a one-to-many relationship with Score
scores = relationship("Score")
# Create a many-to-one relationship with Table
table_id = Column(Integer, ForeignKey('tables.id'))
def __init__(self, time_started=None, table_id=None):
if time_started:
# Convert dates to ISO 8601
self.time_started = dt.strptime(time_started, "%Y-%m-%d %H:%M:%S.%f")
else:
# Store the current time
self.time_started = dt.now()
self.time_completed = None
if table_id:
self.table_id = table_id
def __repr__(self):
return '<Game %r>' % self.id
class Score(Base):
__tablename__ = 'scores'
# Basic metadata
id = Column(Integer, primary_key=True)
score = Column(Integer)
# Create a one-to-many relationship with User
user_id = Column(Integer, ForeignKey('users.id'))
# Create a one-to-many relationship with Game
game_id = Column(Integer, ForeignKey('games.id'))
def __init__(self, score=0, user_id=None, game_id=None):
self.score = score
if user_id:
self.user_id = user_id
if game_id:
self.game_id = game_id
def __repr__(self):
return '<Score %r>' % self.id
|
{
"content_hash": "08b05ce6671c04b01413b01efe4ed004",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 81,
"avg_line_length": 24.695238095238096,
"alnum_prop": 0.6058619359814886,
"repo_name": "stevenmirabito/GoalSentry",
"id": "1b92526dfe92d57a7087eba62bb628488dcdb303",
"size": "2593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/goalsentry/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "932"
},
{
"name": "HTML",
"bytes": "13558"
},
{
"name": "JavaScript",
"bytes": "9759"
},
{
"name": "Python",
"bytes": "10150"
},
{
"name": "Shell",
"bytes": "219"
}
],
"symlink_target": ""
}
|
import pygame as pg
# import math
import random
def interpolate(v1, v2, range):
return pg.math.Vector2(v1.x + (v2.x - v1.x) * range,
v1.y + (v2.y - v1.y) * range)
class Particle(pg.sprite.Sprite):
def __init__(self, game, image, pos, vel, life, lifetime,
fade_start, dorotate):
pg.sprite.Sprite.__init__(self)
self.game = game
self.pos = pos
self.vel = vel
self.rot_cache = {}
self.base_image = image
self.dorotate = dorotate
if dorotate:
self.image = pg.transform.rotate(self.base_image, -self.rot)
else:
self.image = self.base_image.copy()
self.rect = self.image.get_rect()
self.lifetime = lifetime
self.life = life
self.fade_start = fade_start
self.duration = lifetime - fade_start
self.update()
def update(self):
# if self.dorotate:
# old_center = self.rect.center
# if self.rot in self.rot_cache:
# self.image = self.rot_cache[self.rot]
# else:
# self.image = pg.transform.rotate(self.base_image, -self.rot)
# self.rot_cache[self.rot] = self.image
# self.rect = self.image.get_rect()
# self.rect.center = old_center
self.life += self.game.dt
self.fade()
self.pos += self.vel
self.rect.centerx = self.pos.x
self.rect.centery = self.pos.y
def blit(self):
flags = pg.BLEND_ADD
flags = pg.BLEND_RGBA_ADD
return self.game.game_surface.blit(self.image, self.rect, special_flags=flags)
def fade(self):
if self.life > self.fade_start:
try:
ratio = (self.life - self.fade_start) / self.duration
except ZeroDivisionError:
ratio = 1.0
if ratio > 1.0:
ratio = 1.0
mask = int(255 * (1 - ratio))
self.image.fill([mask, mask, mask], special_flags=pg.BLEND_MIN)
def is_dead(self):
if self.life > self.lifetime:
return True
return False
class ParticleEmitter:
def __init__(self, game, parent, offset, vel, image, count, lifetime,
fade_start, size, angle_range, dorotate=False):
self.game = game
self.parent = parent
self.offset = offset
self.particle_vel = vel
self.pos = self.parent.pos + self.offset.rotate(self.parent.rot)
self.base_image = image
self.size = size
self.angle_range = angle_range
self.image = pg.transform.scale(self.base_image, (self.size, self.size))
self.count = count
self.lifetime = lifetime
self.fade_start = fade_start
self.particles = []
self.timer = 0
self.prevcurve = [self.pos for x in range(3)]
self.active = True
def print_state(self):
print("c:{}, p:{}".format(self.count, len(self.particles)))
def update(self):
self.pos = self.parent.pos + self.offset.rotate(-self.parent.rot)
self.rand_angle = random.randint(-self.angle_range, self.angle_range)
# update all particles
for part in self.particles:
part.update()
if part.is_dead():
self.particles.remove(part)
# print("p.kill")
# create a new particle
if self.count != 0 and self.active:
self.timer += self.game.dt
newparticles = self.count * self.timer
if newparticles > 1:
for i in range(int(newparticles)):
t = i / newparticles
time_elapsed = (1.0 - t) * self.game.dt
vel = self.particle_vel.rotate(-self.parent.rot + self.rand_angle)
pos = interpolate(self.prevcurve[0], self.pos, t)
pos += (self.parent.vel + vel) * time_elapsed
# pos += vel * time_elapsed
init_life = time_elapsed
self.timer = 0
# print("new part: pos: {} vel: {}".format(pos, vel))
self.particles.append(Particle(self.game, self.image, pos,
vel, init_life, self.lifetime,
self.fade_start, False))
self.prevcurve[2] = self.prevcurve[1]
self.prevcurve[1] = self.prevcurve[0]
self.prevcurve[0] = self.pos
def draw(self):
rects = []
for part in self.particles:
rects.append(part.blit())
return rects
def kill_all(self):
self.count = 0
self.active = False
self.particles = []
|
{
"content_hash": "8831758a57a97f6a9cb26c4a35f2dae5",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 86,
"avg_line_length": 35.43703703703704,
"alnum_prop": 0.5265468227424749,
"repo_name": "kidscancode/gamedev",
"id": "6bc34d2d5dd362761710e040b95d38349669c220",
"size": "4784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asteroids/particles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1690942"
}
],
"symlink_target": ""
}
|
import os
from django.conf import settings
CAPTCHA_FONT_PATH = getattr(settings, 'CAPTCHA_FONT_PATH', os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'fonts/Vera.ttf')))
CAPTCHA_FONT_SIZE = getattr(settings, 'CAPTCHA_FONT_SIZE', 22)
CAPTCHA_LETTER_ROTATION = getattr(settings, 'CAPTCHA_LETTER_ROTATION', (-35, 35))
CAPTCHA_BACKGROUND_COLOR = getattr(settings, 'CAPTCHA_BACKGROUND_COLOR', '#ffffff')
CAPTCHA_FOREGROUND_COLOR = getattr(settings, 'CAPTCHA_FOREGROUND_COLOR', '#001100')
CAPTCHA_CHALLENGE_FUNCT = getattr(settings, 'CAPTCHA_CHALLENGE_FUNCT', 'captcha.helpers.random_char_challenge')
CAPTCHA_NOISE_FUNCTIONS = getattr(settings, 'CAPTCHA_NOISE_FUNCTIONS', ('captcha.helpers.noise_arcs', 'captcha.helpers.noise_dots',))
CAPTCHA_FILTER_FUNCTIONS = getattr(settings, 'CAPTCHA_FILTER_FUNCTIONS', ('captcha.helpers.post_smooth',))
CAPTCHA_WORDS_DICTIONARY = getattr(settings, 'CAPTCHA_WORDS_DICTIONARY', '/usr/share/dict/words')
CAPTCHA_PUNCTUATION = getattr(settings, 'CAPTCHA_PUNCTUATION', '''_"',.;:-''')
CAPTCHA_FLITE_PATH = getattr(settings, 'CAPTCHA_FLITE_PATH', None)
CAPTCHA_TIMEOUT = getattr(settings, 'CAPTCHA_TIMEOUT', 5) # Minutes
CAPTCHA_LENGTH = int(getattr(settings, 'CAPTCHA_LENGTH', 4)) # Chars
CAPTCHA_IMAGE_BEFORE_FIELD = getattr(settings, 'CAPTCHA_IMAGE_BEFORE_FIELD', True)
CAPTCHA_DICTIONARY_MIN_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MIN_LENGTH', 0)
CAPTCHA_DICTIONARY_MAX_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MAX_LENGTH', 99)
if CAPTCHA_IMAGE_BEFORE_FIELD:
CAPTCHA_OUTPUT_FORMAT = getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', '%(image)s %(hidden_field)s %(text_field)s')
else:
CAPTCHA_OUTPUT_FORMAT = getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', '%(hidden_field)s %(text_field)s %(image)s')
CAPTCHA_TEST_MODE = getattr(settings, 'CAPTCHA_TEST_MODE', getattr(settings, 'CATPCHA_TEST_MODE', False))
# Failsafe
if CAPTCHA_DICTIONARY_MIN_LENGTH > CAPTCHA_DICTIONARY_MAX_LENGTH:
CAPTCHA_DICTIONARY_MIN_LENGTH, CAPTCHA_DICTIONARY_MAX_LENGTH = CAPTCHA_DICTIONARY_MAX_LENGTH, CAPTCHA_DICTIONARY_MIN_LENGTH
def _callable_from_string(string_or_callable):
if callable(string_or_callable):
return string_or_callable
else:
return getattr(__import__('.'.join(string_or_callable.split('.')[:-1]), {}, {}, ['']), string_or_callable.split('.')[-1])
def get_challenge():
return _callable_from_string(CAPTCHA_CHALLENGE_FUNCT)
def noise_functions():
if CAPTCHA_NOISE_FUNCTIONS:
return map(_callable_from_string, CAPTCHA_NOISE_FUNCTIONS)
return []
def filter_functions():
if CAPTCHA_FILTER_FUNCTIONS:
return map(_callable_from_string, CAPTCHA_FILTER_FUNCTIONS)
return []
|
{
"content_hash": "34748cc08d3119be110da359ab7ff002",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 141,
"avg_line_length": 51.80769230769231,
"alnum_prop": 0.7323682256867112,
"repo_name": "madflow/seahub",
"id": "e344bd910799786f99b6d394da25c9e73bb4dc38",
"size": "2696",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "thirdpart/captcha/conf/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "318263"
},
{
"name": "HTML",
"bytes": "789775"
},
{
"name": "Java",
"bytes": "2137623"
},
{
"name": "JavaScript",
"bytes": "3054991"
},
{
"name": "Makefile",
"bytes": "1004"
},
{
"name": "PLSQL",
"bytes": "17176"
},
{
"name": "Python",
"bytes": "1709615"
},
{
"name": "Shell",
"bytes": "9695"
}
],
"symlink_target": ""
}
|
"""Support for Tellstick Net/Telstick Live sensors."""
from __future__ import annotations
from homeassistant.components import sensor, tellduslive
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
LENGTH_MILLIMETERS,
LIGHT_LUX,
PERCENTAGE,
POWER_WATT,
PRECIPITATION_MILLIMETERS_PER_HOUR,
SPEED_METERS_PER_SECOND,
TEMP_CELSIUS,
UV_INDEX,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .entry import TelldusLiveEntity
SENSOR_TYPE_TEMPERATURE = "temp"
SENSOR_TYPE_HUMIDITY = "humidity"
SENSOR_TYPE_RAINRATE = "rrate"
SENSOR_TYPE_RAINTOTAL = "rtot"
SENSOR_TYPE_WINDDIRECTION = "wdir"
SENSOR_TYPE_WINDAVERAGE = "wavg"
SENSOR_TYPE_WINDGUST = "wgust"
SENSOR_TYPE_UV = "uv"
SENSOR_TYPE_WATT = "watt"
SENSOR_TYPE_LUMINANCE = "lum"
SENSOR_TYPE_DEW_POINT = "dewp"
SENSOR_TYPE_BAROMETRIC_PRESSURE = "barpress"
SENSOR_TYPES: dict[str, SensorEntityDescription] = {
SENSOR_TYPE_TEMPERATURE: SensorEntityDescription(
key=SENSOR_TYPE_TEMPERATURE,
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE,
),
SENSOR_TYPE_HUMIDITY: SensorEntityDescription(
key=SENSOR_TYPE_HUMIDITY,
name="Humidity",
native_unit_of_measurement=PERCENTAGE,
device_class=DEVICE_CLASS_HUMIDITY,
),
SENSOR_TYPE_RAINRATE: SensorEntityDescription(
key=SENSOR_TYPE_RAINRATE,
name="Rain rate",
native_unit_of_measurement=PRECIPITATION_MILLIMETERS_PER_HOUR,
icon="mdi:water",
),
SENSOR_TYPE_RAINTOTAL: SensorEntityDescription(
key=SENSOR_TYPE_RAINTOTAL,
name="Rain total",
native_unit_of_measurement=LENGTH_MILLIMETERS,
icon="mdi:water",
),
SENSOR_TYPE_WINDDIRECTION: SensorEntityDescription(
key=SENSOR_TYPE_WINDDIRECTION,
name="Wind direction",
),
SENSOR_TYPE_WINDAVERAGE: SensorEntityDescription(
key=SENSOR_TYPE_WINDAVERAGE,
name="Wind average",
native_unit_of_measurement=SPEED_METERS_PER_SECOND,
),
SENSOR_TYPE_WINDGUST: SensorEntityDescription(
key=SENSOR_TYPE_WINDGUST,
name="Wind gust",
native_unit_of_measurement=SPEED_METERS_PER_SECOND,
),
SENSOR_TYPE_UV: SensorEntityDescription(
key=SENSOR_TYPE_UV,
name="UV",
native_unit_of_measurement=UV_INDEX,
),
SENSOR_TYPE_WATT: SensorEntityDescription(
key=SENSOR_TYPE_WATT,
name="Power",
native_unit_of_measurement=POWER_WATT,
),
SENSOR_TYPE_LUMINANCE: SensorEntityDescription(
key=SENSOR_TYPE_LUMINANCE,
name="Luminance",
native_unit_of_measurement=LIGHT_LUX,
device_class=DEVICE_CLASS_ILLUMINANCE,
),
SENSOR_TYPE_DEW_POINT: SensorEntityDescription(
key=SENSOR_TYPE_DEW_POINT,
name="Dew Point",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE,
),
SENSOR_TYPE_BAROMETRIC_PRESSURE: SensorEntityDescription(
key=SENSOR_TYPE_BAROMETRIC_PRESSURE,
name="Barometric Pressure",
native_unit_of_measurement="kPa",
),
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up tellduslive sensors dynamically."""
async def async_discover_sensor(device_id):
"""Discover and add a discovered sensor."""
client = hass.data[tellduslive.DOMAIN]
async_add_entities([TelldusLiveSensor(client, device_id)])
async_dispatcher_connect(
hass,
tellduslive.TELLDUS_DISCOVERY_NEW.format(sensor.DOMAIN, tellduslive.DOMAIN),
async_discover_sensor,
)
class TelldusLiveSensor(TelldusLiveEntity, SensorEntity):
"""Representation of a Telldus Live sensor."""
def __init__(self, client, device_id):
"""Initialize TelldusLiveSensor."""
super().__init__(client, device_id)
if desc := SENSOR_TYPES.get(self._type):
self.entity_description = desc
@property
def device_id(self):
"""Return id of the device."""
return self._id[0]
@property
def _type(self):
"""Return the type of the sensor."""
return self._id[1]
@property
def _value(self):
"""Return value of the sensor."""
return self.device.value(*self._id[1:])
@property
def _value_as_temperature(self):
"""Return the value as temperature."""
return round(float(self._value), 1)
@property
def _value_as_luminance(self):
"""Return the value as luminance."""
return round(float(self._value), 1)
@property
def _value_as_humidity(self):
"""Return the value as humidity."""
return int(round(float(self._value)))
@property
def name(self):
"""Return the name of the sensor."""
quantity_name = (
self.entity_description.name if hasattr(self, "entity_description") else ""
)
return "{} {}".format(super().name, quantity_name or "").strip()
@property
def native_value(self):
"""Return the state of the sensor."""
if not self.available:
return None
if self._type == SENSOR_TYPE_TEMPERATURE:
return self._value_as_temperature
if self._type == SENSOR_TYPE_HUMIDITY:
return self._value_as_humidity
if self._type == SENSOR_TYPE_LUMINANCE:
return self._value_as_luminance
return self._value
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return "{}-{}-{}".format(*self._id)
|
{
"content_hash": "5c72b602e8fa99f2f8bfed7925be0ea2",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 87,
"avg_line_length": 31.617486338797814,
"alnum_prop": 0.6491531282405807,
"repo_name": "jawilson/home-assistant",
"id": "729b605250757f570ac3fe1bd51e60532949ae87",
"size": "5786",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "homeassistant/components/tellduslive/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pyOCD.target.target import Target
from pyOCD.target.target import TARGET_RUNNING, TARGET_HALTED
from pyOCD.transport.cmsis_dap import DP_REG
import logging
import struct
# Debug Halting Control and Status Register
DHCSR = 0xE000EDF0
# Debug Core Register Selector Register
DCRSR = 0xE000EDF4
REGWnR = (1 << 16)
# Debug Core Register Data Register
DCRDR = 0xE000EDF8
# Debug Exception and Monitor Control Register
DEMCR = 0xE000EDFC
TRACE_ENA = (1 << 24)
VC_HARDERR = (1 << 9)
VC_BUSERR = (1 << 8)
VC_CORERESET = (1 << 0)
# CPUID Register
CPUID = 0xE000ED00
# CPUID masks
CPUID_IMPLEMENTER_MASK = 0xff000000
CPUID_IMPLEMENTER_POS = 24
CPUID_VARIANT_MASK = 0x00f00000
CPUID_VARIANT_POS = 20
CPUID_ARCHITECTURE_MASK = 0x000f0000
CPUID_ARCHITECTURE_POS = 16
CPUID_PARTNO_MASK = 0x0000fff0
CPUID_PARTNO_POS = 4
CPUID_REVISION_MASK = 0x0000000f
CPUID_REVISION_POS = 0
CPUID_IMPLEMENTER_ARM = 0x41
ARMv6M = 0xC
ARMv7M = 0xF
# CPUID PARTNO values
ARM_CortexM0 = 0xC20
ARM_CortexM1 = 0xC21
ARM_CortexM3 = 0xC23
ARM_CortexM4 = 0xC24
ARM_CortexM0p = 0xC60
# User-friendly names for core types.
CORE_TYPE_NAME = {
ARM_CortexM0 : "Cortex-M0",
ARM_CortexM1 : "Cortex-M1",
ARM_CortexM3 : "Cortex-M3",
ARM_CortexM4 : "Cortex-M4",
ARM_CortexM0p : "Cortex-M0+"
}
# Coprocessor Access Control Register
CPACR = 0xE000ED88
CPACR_CP10_CP11_MASK = (3 << 20) | (3 << 22)
NVIC_AIRCR = (0xE000ED0C)
NVIC_AIRCR_VECTKEY = (0x5FA << 16)
NVIC_AIRCR_VECTRESET = (1 << 0)
NVIC_AIRCR_SYSRESETREQ = (1 << 2)
CSYSPWRUPACK = 0x80000000
CDBGPWRUPACK = 0x20000000
CSYSPWRUPREQ = 0x40000000
CDBGPWRUPREQ = 0x10000000
TRNNORMAL = 0x00000000
MASKLANE = 0x00000f00
# DHCSR bit masks
C_DEBUGEN = (1 << 0)
C_HALT = (1 << 1)
C_STEP = (1 << 2)
C_MASKINTS = (1 << 3)
C_SNAPSTALL = (1 << 5)
S_REGRDY = (1 << 16)
S_HALT = (1 << 17)
S_SLEEP = (1 << 18)
S_LOCKUP = (1 << 19)
DBGKEY = (0xA05F << 16)
# FPB (breakpoint)
FP_CTRL = (0xE0002000)
FP_CTRL_KEY = (1 << 1)
FP_COMP0 = (0xE0002008)
# Map from register name to DCRSR register index.
#
# The CONTROL, FAULTMASK, BASEPRI, and PRIMASK registers are special in that they share the
# same DCRSR register index and are returned as a single value. In this dict, these registers
# have negative values to signal to the register read/write functions that special handling
# is necessary. The values are the byte number containing the register value, plus 1 and then
# negated. So -1 means a mask of 0xff, -2 is 0xff00, and so on. The actual DCRSR register index
# for these combined registers has the key of 'cfbp'.
CORE_REGISTER = {
'r0': 0,
'r1': 1,
'r2': 2,
'r3': 3,
'r4': 4,
'r5': 5,
'r6': 6,
'r7': 7,
'r8': 8,
'r9': 9,
'r10': 10,
'r11': 11,
'r12': 12,
'sp': 13,
'lr': 14,
'pc': 15,
'xpsr': 16,
'msp': 17,
'psp': 18,
'cfbp': 20,
'control': -4,
'faultmask': -3,
'basepri': -2,
'primask': -1,
'fpscr': 33,
's0': 128,
's1': 129,
's2': 130,
's3': 131,
's4': 132,
's5': 133,
's6': 134,
's7': 135,
's8': 136,
's9': 137,
's10': 138,
's11': 139,
's12': 140,
's13': 141,
's14': 142,
's15': 143,
's16': 144,
's17': 145,
's18': 146,
's19': 147,
's20': 148,
's21': 149,
's22': 150,
's23': 151,
's24': 152,
's25': 153,
's26': 154,
's27': 155,
's28': 156,
's29': 157,
's30': 158,
's31': 159,
}
"""
convert a byte array into a word array
"""
def byte2word(data):
res = []
for i in range(len(data)/4):
res.append(data[i*4 + 0] << 0 |
data[i*4 + 1] << 8 |
data[i*4 + 2] << 16 |
data[i*4 + 3] << 24)
return res
"""
convert a word array into a byte array
"""
def word2byte(data):
res = []
for x in data:
res.append((x >> 0) & 0xff)
res.append((x >> 8) & 0xff)
res.append((x >> 16) & 0xff)
res.append((x >> 24) & 0xff)
return res
## @brief Convert a 32-bit int to an IEEE754 float.
def int2float(data):
d = struct.pack("@I", data)
return struct.unpack("@f", d)[0]
## @brief Convert an IEEE754 float to a 32-bit int.
def float2int(data):
d = struct.pack("@f", data)
return struct.unpack("@I", d)[0]
class Breakpoint(object):
def __init__(self, comp_register_addr):
self.comp_register_addr = comp_register_addr
self.enabled = False
self.addr = 0
class CortexM(Target):
"""
This class has basic functions to access a Cortex M core:
- init
- read/write memory
- read/write core registers
- set/remove hardware breakpoints
"""
targetXML = """<?xml version="1.0"?>
<!DOCTYPE feature SYSTEM "gdb-target.dtd">
<target>
<feature name="org.gnu.gdb.arm.m-profile">
<reg name="r0" bitsize="32"/>
<reg name="r1" bitsize="32"/>
<reg name="r2" bitsize="32"/>
<reg name="r3" bitsize="32"/>
<reg name="r4" bitsize="32"/>
<reg name="r5" bitsize="32"/>
<reg name="r6" bitsize="32"/>
<reg name="r7" bitsize="32"/>
<reg name="r8" bitsize="32"/>
<reg name="r9" bitsize="32"/>
<reg name="r10" bitsize="32"/>
<reg name="r11" bitsize="32"/>
<reg name="r12" bitsize="32"/>
<reg name="sp" bitsize="32" type="data_ptr"/>
<reg name="lr" bitsize="32"/>
<reg name="pc" bitsize="32" type="code_ptr"/>
<reg name="xpsr" bitsize="32" regnum="16"/>
</feature>
</target>
"""
def __init__(self, transport):
super(CortexM, self).__init__(transport)
self.auto_increment_page_size = 0
self.idcode = 0
self.breakpoints = []
self.nb_code = 0
self.nb_lit = 0
self.num_breakpoint_used = 0
self.nb_lit = 0
self.fpb_enabled = False
self.arch = 0
self.core_type = 0
self.has_fpu = False
self.part_number = self.__class__.__name__
def init(self, setup_fpb = True):
"""
Cortex M initialization
"""
self.idcode = self.readIDCode()
# select bank 0 (to access DRW and TAR)
self.transport.writeDP(DP_REG['SELECT'], 0)
self.transport.writeDP(DP_REG['CTRL_STAT'], CSYSPWRUPREQ | CDBGPWRUPREQ)
while True:
r = self.transport.readDP(DP_REG['CTRL_STAT'])
if (r & (CDBGPWRUPACK | CSYSPWRUPACK)) == (CDBGPWRUPACK | CSYSPWRUPACK):
break
self.transport.writeDP(DP_REG['CTRL_STAT'], CSYSPWRUPREQ | CDBGPWRUPREQ | TRNNORMAL | MASKLANE)
self.transport.writeDP(DP_REG['SELECT'], 0)
if setup_fpb:
self.halt()
self.setupFPB()
self.readCoreType()
self.checkForFPU()
## @brief Read the CPUID register and determine core type.
def readCoreType(self):
# Read CPUID register
cpuid = self.read32(CPUID)
implementer = (cpuid & CPUID_IMPLEMENTER_MASK) >> CPUID_IMPLEMENTER_POS
if implementer != CPUID_IMPLEMENTER_ARM:
logging.warning("CPU implementer is not ARM!")
self.arch = (cpuid & CPUID_ARCHITECTURE_MASK) >> CPUID_ARCHITECTURE_POS
self.core_type = (cpuid & CPUID_PARTNO_MASK) >> CPUID_PARTNO_POS
logging.info("CPU core is %s", CORE_TYPE_NAME[self.core_type])
## @brief Determine if a Cortex-M4 has an FPU.
#
# The core type must have been identified prior to calling this function.
def checkForFPU(self):
if self.core_type != ARM_CortexM4:
self.has_fpu = False
return
originalCpacr = self.read32(CPACR)
cpacr = originalCpacr | CPACR_CP10_CP11_MASK
self.write32(CPACR, cpacr)
cpacr = self.read32(CPACR)
self.has_fpu = (cpacr & CPACR_CP10_CP11_MASK) != 0
# Restore previous value.
self.write32(CPACR, originalCpacr)
if self.has_fpu:
logging.info("FPU present")
def setupFPB(self):
"""
Reads the number of hardware breakpoints available on the core
and disable the FPB (Flash Patch and Breakpoint Unit)
which will be enabled when a first breakpoint will be set
"""
# setup FPB (breakpoint)
fpcr = self.readMemory(FP_CTRL)
self.nb_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF)
self.nb_lit = (fpcr >> 7) & 0xf
logging.info("%d hardware breakpoints, %d literal comparators", self.nb_code, self.nb_lit)
for i in range(self.nb_code):
self.breakpoints.append(Breakpoint(FP_COMP0 + 4*i))
# disable FPB (will be enabled on first bp set)
self.disableFPB()
for bp in self.breakpoints:
self.writeMemory(bp.comp_register_addr, 0)
def info(self, request):
return self.transport.info(request)
def readIDCode(self):
"""
return the IDCODE of the core
"""
if self.idcode == 0:
self.idcode = self.transport.readDP(DP_REG['IDCODE'])
return self.idcode
def writeMemory(self, addr, value, transfer_size = 32):
"""
write a memory location.
By default the transfer size is a word
"""
self.transport.writeMem(addr, value, transfer_size)
return
def write32(self, addr, value):
"""
Shorthand to write a 32-bit word.
"""
self.writeMemory(addr, value, 32)
def write16(self, addr, value):
"""
Shorthand to write a 16-bit halfword.
"""
self.writeMemory(addr, value, 16)
def write8(self, addr, value):
"""
Shorthand to write a byte.
"""
self.writeMemory(addr, value, 8)
def readMemory(self, addr, transfer_size = 32):
"""
read a memory location. By default, a word will
be read
"""
return self.transport.readMem(addr, transfer_size)
def read32(self, addr):
"""
Shorthand to read a 32-bit word.
"""
return self.readMemory(addr, 32)
def read16(self, addr):
"""
Shorthand to read a 16-bit halfword.
"""
return self.readMemory(addr, 16)
def read8(self, addr):
"""
Shorthand to read a byte.
"""
return self.readMemory(addr, 8)
def readBlockMemoryUnaligned8(self, addr, size):
"""
read a block of unaligned bytes in memory. Returns
an array of byte values
"""
res = []
# try to read 8bits data
if (size > 0) and (addr & 0x01):
mem = self.readMemory(addr, 8)
logging.debug("get 1 byte at %s: 0x%X", hex(addr), mem)
res.append(mem)
size -= 1
addr += 1
# try to read 16bits data
if (size > 1) and (addr & 0x02):
mem = self.readMemory(addr, 16)
logging.debug("get 2 bytes at %s: 0x%X", hex(addr), mem)
res.append(mem & 0xff)
res.append((mem >> 8) & 0xff)
size -= 2
addr += 2
# try to read aligned block of 32bits
if (size >= 4):
logging.debug("read blocks aligned at 0x%X, size: 0x%X", addr, (size/4)*4)
mem = self.readBlockMemoryAligned32(addr, size/4)
res += word2byte(mem)
size -= 4*len(mem)
addr += 4*len(mem)
if (size > 1):
mem = self.readMemory(addr, 16)
logging.debug("get 2 bytes at %s: 0x%X", hex(addr), mem)
res.append(mem & 0xff)
res.append((mem >> 8) & 0xff)
size -= 2
addr += 2
if (size > 0):
mem = self.readMemory(addr, 8)
logging.debug("get 1 byte remaining at %s: 0x%X", hex(addr), mem)
res.append(mem)
size -= 1
addr += 1
return res
def writeBlockMemoryUnaligned8(self, addr, data):
"""
write a block of unaligned bytes in memory.
"""
size = len(data)
idx = 0
#try to write 8 bits data
if (size > 0) and (addr & 0x01):
logging.debug("write 1 byte at 0x%X: 0x%X", addr, data[idx])
self.writeMemory(addr, data[idx], 8)
size -= 1
addr += 1
idx += 1
# try to write 16 bits data
if (size > 1) and (addr & 0x02):
logging.debug("write 2 bytes at 0x%X: 0x%X", addr, data[idx] | (data[idx+1] << 8))
self.writeMemory(addr, data[idx] | (data[idx+1] << 8), 16)
size -= 2
addr += 2
idx += 2
# write aligned block of 32 bits
if (size >= 4):
logging.debug("write blocks aligned at 0x%X, size: 0x%X", addr, (size/4)*4)
data32 = byte2word(data[idx:idx + (size & ~0x03)])
self.writeBlockMemoryAligned32(addr, data32)
addr += size & ~0x03
idx += size & ~0x03
size -= size & ~0x03
# try to write 16 bits data
if (size > 1):
logging.debug("write 2 bytes at 0x%X: 0x%X", addr, data[idx] | (data[idx+1] << 8))
self.writeMemory(addr, data[idx] | (data[idx+1] << 8), 16)
size -= 2
addr += 2
idx += 2
#try to write 8 bits data
if (size > 0):
logging.debug("write 1 byte at 0x%X: 0x%X", addr, data[idx])
self.writeMemory(addr, data[idx], 8)
size -= 1
addr += 1
idx += 1
return
def writeBlockMemoryAligned32(self, addr, data):
"""
write a block of aligned words in memory.
"""
size = len(data)
while size > 0:
n = self.auto_increment_page_size - (addr & (self.auto_increment_page_size - 1))
if size*4 < n:
n = (size*4) & 0xfffffffc
self.transport.writeBlock32(addr, data[:n/4])
data = data[n/4:]
size -= n/4
addr += n
return
def readBlockMemoryAligned32(self, addr, size):
"""
read a block of aligned words in memory. Returns
an array of word values
"""
resp = []
while size > 0:
n = self.auto_increment_page_size - (addr & (self.auto_increment_page_size - 1))
if size*4 < n:
n = (size*4) & 0xfffffffc
resp += self.transport.readBlock32(addr, n/4)
size -= n/4
addr += n
return resp
def halt(self):
"""
halt the core
"""
self.writeMemory(DHCSR, DBGKEY | C_DEBUGEN | C_HALT)
return
def step(self):
"""
perform an instruction level step
"""
if self.getState() != TARGET_HALTED:
logging.debug('cannot step: target not halted')
return
if self.maybeSkipBreakpoint() is None:
self.writeMemory(DHCSR, DBGKEY | C_DEBUGEN | C_STEP)
return
def reset(self):
"""
reset a core. After a call to this function, the core
is running
"""
self.transport.reset()
def resetStopOnReset(self):
"""
perform a reset and stop the core on the reset handler
"""
logging.debug("reset stop on Reset")
# read address of reset handler
reset_handler = self.readMemory(4)
# halt the target
self.halt()
# set a breakpoint to the reset handler and reset the target
self.setBreakpoint(reset_handler)
self.transport.reset()
# wait until the bp is reached
while (self.getState() == TARGET_RUNNING):
pass
# remove the breakpoint
self.removeBreakpoint(reset_handler)
logging.debug("stopped on reset handler: 0x%X", reset_handler)
def setTargetState(self, state):
if state == "PROGRAM":
self.resetStopOnReset()
self.writeMemory(DHCSR, DBGKEY | C_DEBUGEN | C_HALT)
self.writeMemory(DEMCR, VC_CORERESET)
self.writeMemory(NVIC_AIRCR, NVIC_AIRCR_VECTKEY | NVIC_AIRCR_SYSRESETREQ)
while self.getState() == TARGET_RUNNING:
pass
self.writeMemory(DEMCR, 0)
def getState(self):
dhcsr = self.readMemory(DHCSR)
if dhcsr & (C_STEP | C_HALT):
return TARGET_HALTED
return TARGET_RUNNING
def resume(self):
"""
resume the execution
"""
if self.getState() != TARGET_HALTED:
logging.debug('cannot resume: target not halted')
return
self.maybeSkipBreakpoint()
self.writeMemory(DHCSR, DBGKEY | C_DEBUGEN)
return
def maybeSkipBreakpoint(self):
pc = self.readCoreRegister('pc')
bp = self.findBreakpoint(pc)
if bp is not None:
logging.debug('skip/resume breakpoint: pc 0x%X', pc)
self.removeBreakpoint(pc)
self.writeMemory(DHCSR, DBGKEY | C_DEBUGEN | C_STEP)
self.setBreakpoint(pc)
logging.debug('step over breakpoint: now pc0x%X', self.readCoreRegister('pc'))
return bp
return None
def findBreakpoint(self, addr):
for bp in self.breakpoints:
if bp.enabled and bp.addr == addr:
return bp
return None
def readCoreRegister(self, reg):
"""
read a core register (r0 .. r16).
If reg is a string, find the number associated to this register
in the lookup table CORE_REGISTER
"""
if isinstance(reg, str):
try:
reg = CORE_REGISTER[reg.lower()]
except KeyError:
logging.error('cannot find %s core register', reg)
return
if (reg < 0) and (reg >= -4):
specialReg = reg
reg = CORE_REGISTER['cfbp']
else:
specialReg = 0
if reg not in CORE_REGISTER.values():
logging.error("unknown reg: %d", reg)
return
elif ((reg >= 128) or (reg == 33)) and (not self.has_fpu):
logging.error("attempt to read FPU register without FPU")
return
# write id in DCRSR
self.writeMemory(DCRSR, reg)
# Technically, we need to poll S_REGRDY in DHCSR here before reading DCRDR. But
# we're running so slow compared to the target that it's not necessary.
# read DCRDR
val = self.readMemory(DCRDR)
# Special handling for registers that are combined into a single DCRSR number.
if specialReg:
val = (val >> ((-specialReg - 1) * 4)) & 0xff
# Convert int to float.
elif reg >= 128:
val = int2float(val)
return val
def writeCoreRegister(self, reg, data):
"""
write a core register (r0 .. r16)
If reg is a string, find the number associated to this register
in the lookup table CORE_REGISTER
"""
if isinstance(reg, str):
try:
reg = CORE_REGISTER[reg.lower()]
except KeyError:
logging.error('cannot find %s core register', reg)
return
if (reg < 0) and (reg >= -4):
specialReg = reg
reg = CORE_REGISTER['cfbp']
# Mask in the new special register value so we don't modify the other register
# values that share the same DCRSR number.
specialRegValue = self.readCoreRegister(reg)
shift = (-specialReg - 1) * 4
mask = 0xffffffff ^ (0xff << shift)
data = (specialRegValue & mask) | ((data & 0xff) << shift)
else:
specialReg = 0
if reg not in CORE_REGISTER.values():
logging.error("unknown reg: %d", reg)
return
elif ((reg >= 128) or (reg == 33)) and (not self.has_fpu):
logging.error("attempt to read FPU register without FPU")
return
# Convert float to int.
if reg >= 128:
data = float2int(data)
# write id in DCRSR
self.writeMemory(DCRDR, data)
# write DCRDR
self.writeMemory(DCRSR, reg | REGWnR)
def setBreakpoint(self, addr):
"""
set a hardware breakpoint at a specific location in flash
"""
if self.fpb_enabled is False:
self.enableFPB()
if self.availableBreakpoint() == 0:
logging.error('No more available breakpoint!!, dropped bp at 0x%X', addr)
return False
for bp in self.breakpoints:
if not bp.enabled:
bp.enabled = True
bp_match = (1 << 30)
if addr & 0x2:
bp_match = (2 << 30)
self.writeMemory(bp.comp_register_addr, addr & 0x1ffffffc | bp_match | 1)
bp.addr = addr
self.num_breakpoint_used += 1
return True
return False
def availableBreakpoint(self):
return len(self.breakpoints) - self.num_breakpoint_used
def enableFPB(self):
self.writeMemory(FP_CTRL, FP_CTRL_KEY | 1)
self.fpb_enabled = True
logging.debug('fpb has been enabled')
return
def disableFPB(self):
self.writeMemory(FP_CTRL, FP_CTRL_KEY | 0)
self.fpb_enabled = False
logging.debug('fpb has been disabled')
return
def removeBreakpoint(self, addr):
"""
remove a hardware breakpoint at a specific location in flash
"""
for bp in self.breakpoints:
if bp.enabled and bp.addr == addr:
bp.enabled = False
self.writeMemory(bp.comp_register_addr, 0)
bp.addr = addr
self.num_breakpoint_used -= 1
return
return
# GDB functions
def getTargetXML(self):
return self.targetXML, len(self.targetXML)
def getRegisterName(self, compare_val):
for key in CORE_REGISTER:
if (compare_val == CORE_REGISTER[key]):
return key
|
{
"content_hash": "cd9987a1e412d89576a54dc640f2e297",
"timestamp": "",
"source": "github",
"line_count": 782,
"max_line_length": 103,
"avg_line_length": 30.286445012787723,
"alnum_prop": 0.5352558689410573,
"repo_name": "NordicSemiconductor/pyOCD",
"id": "97077c5df9a07fab7f914c155423f9b378b0fab8",
"size": "23684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyOCD/target/cortex_m.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "174778"
}
],
"symlink_target": ""
}
|
from subprocess import call
import os
from django.core.management.base import BaseCommand, CommandError
from ...conf import settings
from ... import utils
class Command(BaseCommand):
help = 'Pushes english locale source'
requires_system_checks = False
def handle(self, *args, **options):
# Requires `pip install transifex-client``
# also: ``$ tx init`` to create credentials
root = os.path.split(settings.ST_BASE_DIR)[0]
tx_dir = os.path.join(root, '.tx')
if not os.path.isdir(tx_dir):
raise CommandError('Can\'t find the .tx folder in %s' % (root, ))
with utils.pushd(root):
# -t will update the translation,
# only if it was updated locally,
# so use when fixing something
call(["tx", "push", "--source", "--skip", "--language", "en"])
self.stdout.write('ok')
|
{
"content_hash": "c0a8d4afa157695edbc924132e728a55",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 29.966666666666665,
"alnum_prop": 0.60734149054505,
"repo_name": "nitely/Spirit",
"id": "e84a6ef2e1671d12a039a5dedf3342d6eacfbf64",
"size": "924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spirit/core/management/commands/spirittxpush.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "207949"
},
{
"name": "CoffeeScript",
"bytes": "105109"
},
{
"name": "HTML",
"bytes": "171485"
},
{
"name": "JavaScript",
"bytes": "2759"
},
{
"name": "Makefile",
"bytes": "709"
},
{
"name": "Python",
"bytes": "854233"
},
{
"name": "SCSS",
"bytes": "94771"
}
],
"symlink_target": ""
}
|
"""Tests for Incremental PCA."""
import numpy as np
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
from scipy import sparse
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
assert X_transformed.shape == (X.shape[0], 2)
np.testing.assert_allclose(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), rtol=1e-3)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
np.testing.assert_allclose(np.dot(cov, precision),
np.eye(X.shape[1]), atol=1e-13)
@pytest.mark.parametrize(
"matrix_class",
[sparse.csc_matrix, sparse.csr_matrix, sparse.lil_matrix])
def test_incremental_pca_sparse(matrix_class):
# Incremental PCA on sparse arrays.
X = iris.data
pca = PCA(n_components=2)
pca.fit_transform(X)
X_sparse = matrix_class(X)
batch_size = X_sparse.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
X_transformed = ipca.fit_transform(X_sparse)
assert X_transformed.shape == (X_sparse.shape[0], 2)
np.testing.assert_allclose(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), rtol=1e-3)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X_sparse)
cov = ipca.get_covariance()
precision = ipca.get_precision()
np.testing.assert_allclose(np.dot(cov, precision),
np.eye(X_sparse.shape[1]), atol=1e-13)
with pytest.raises(
TypeError,
match="IncrementalPCA.partial_fit does not support "
"sparse input. Either convert data to dense "
"or use IncrementalPCA.fit to do so in batches."):
ipca.partial_fit(X_sparse)
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = np.array([[0, 1, 0], [1, 0, 0]])
n_samples, n_features = X.shape
for n_components in [-1, 0, .99, 4]:
with pytest.raises(ValueError, match="n_components={} invalid"
" for n_features={}, need more rows than"
" columns for IncrementalPCA"
" processing".format(n_components,
n_features)):
IncrementalPCA(n_components, batch_size=10).fit(X)
# Tests that n_components is also <= n_samples.
n_components = 3
with pytest.raises(ValueError, match="n_components={} must be"
" less or equal to the batch number of"
" samples {}".format(n_components, n_samples)):
IncrementalPCA(n_components=n_components).partial_fit(X)
def test_n_components_none():
# Ensures that n_components == None is handled correctly
rng = np.random.RandomState(1999)
for n_samples, n_features in [(50, 10), (10, 50)]:
X = rng.rand(n_samples, n_features)
ipca = IncrementalPCA(n_components=None)
# First partial_fit call, ipca.n_components_ is inferred from
# min(X.shape)
ipca.partial_fit(X)
assert ipca.n_components_ == min(X.shape)
# Second partial_fit call, ipca.n_components_ is inferred from
# ipca.components_ computed from the first partial_fit call
ipca.partial_fit(X)
assert ipca.n_components_ == ipca.components_.shape[0]
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
with pytest.raises(ValueError):
ipca.partial_fit(X2)
# Increasing number of components
ipca.set_params(n_components=15)
with pytest.raises(ValueError):
ipca.partial_fit(X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
with pytest.raises(ValueError):
ipca.partial_fit(X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_batch_rank():
# Test sample size in each batch is always larger or equal to n_components
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 90, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=20, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for components_i, components_j in zip(all_components[:-1],
all_components[1:]):
assert_allclose_dense_sparse(components_i, components_j)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_singular_values():
# Check that the IncrementalPCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 100
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=10, random_state=rng)
pca = PCA(n_components=10, svd_solver='full', random_state=rng).fit(X)
ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_ipca = ipca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(ipca.singular_values_**2.0),
np.linalg.norm(X_ipca, "fro")**2.0, 2)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(ipca.singular_values_,
np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=3, random_state=rng)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
ipca = IncrementalPCA(n_components=3, batch_size=100)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
ipca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
def test_incremental_pca_partial_fit_float_division():
# Test to ensure float division is used in all versions of Python
# (non-regression test for issue #9489)
rng = np.random.RandomState(0)
A = rng.randn(5, 3) + 2
B = rng.randn(7, 3) + 5
pca = IncrementalPCA(n_components=2)
pca.partial_fit(A)
# Set n_samples_seen_ to be a floating point number instead of an int
pca.n_samples_seen_ = float(pca.n_samples_seen_)
pca.partial_fit(B)
singular_vals_float_samples_seen = pca.singular_values_
pca2 = IncrementalPCA(n_components=2)
pca2.partial_fit(A)
pca2.partial_fit(B)
singular_vals_int_samples_seen = pca2.singular_values_
np.testing.assert_allclose(singular_vals_float_samples_seen,
singular_vals_int_samples_seen)
def test_incremental_pca_fit_overflow_error():
# Test for overflow error on Windows OS
# (non-regression test for issue #17693)
rng = np.random.RandomState(0)
A = rng.rand(500000, 2)
ipca = IncrementalPCA(n_components=2, batch_size=10000)
ipca.fit(A)
pca = PCA(n_components=2)
pca.fit(A)
np.testing.assert_allclose(ipca.singular_values_, pca.singular_values_)
|
{
"content_hash": "ff1f4bb6271aa3692fdf768342cbe114",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 79,
"avg_line_length": 37.069825436408976,
"alnum_prop": 0.6316851664984864,
"repo_name": "ryfeus/lambda-packs",
"id": "d198b67c720c1df8ee5b6cbff5ec9b27e0a1d873",
"size": "14865",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "Sklearn_arm/source/sklearn/decomposition/tests/test_incremental_pca.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
from common import *
from ast import *
|
{
"content_hash": "8f82a355fc4c93a0902d2074dc500474",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 20,
"avg_line_length": 10.25,
"alnum_prop": 0.7073170731707317,
"repo_name": "ArcherSys/ArcherSys",
"id": "74b83d08e56aa39afa26b9d6a37c55d8a62069d6",
"size": "41",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Lib/site-packages/construct/text/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('linked_domain', '0013_auto_20201005_2215'),
]
operations = [
migrations.AlterField(
model_name='domainlinkhistory',
name='model',
field=models.CharField(choices=[('app', 'Application'),
('fixture', 'Lookup Table'),
('report', 'Report'),
('keyword', 'Keyword'),
('custom_user_data', 'Custom User Data Fields'),
('custom_product_data', 'Custom Product Data Fields'),
('custom_location_data', 'Custom Location Data Fields'),
('roles', 'User Roles'),
('toggles', 'Feature Flags and Previews'),
('case_search_data', 'Case Search Settings'),
('data_dictionary', 'Data Dictionary'),
('dialer_settings', 'Dialer Settings'),
('otp_settings', 'OTP Pass-through Settings'),
('hmac_callout_settings', 'Signed Callout')],
max_length=128),
),
]
|
{
"content_hash": "1f9e47eb6f41dbe16a97cb501464d5ce",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 100,
"avg_line_length": 50.56666666666667,
"alnum_prop": 0.3862887277521424,
"repo_name": "dimagi/commcare-hq",
"id": "e721a5c178018fe689634732f5dc210eccde698f",
"size": "1567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/linked_domain/migrations/0014_auto_20210503_1758.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import keypairs
from nova.api.openstack import wsgi
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.objects import test_keypair
QUOTAS = quota.QUOTAS
keypair_data = {
'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
}
def fake_keypair(name):
return dict(test_keypair.fake_keypair,
name=name, **keypair_data)
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
return fake_keypair(name=keypair['name'])
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_create_duplicate(context, keypair):
raise exception.KeyPairExists(key_name=keypair.get('name', ''))
class KeypairsTest(test.TestCase):
def setUp(self):
super(KeypairsTest, self).setUp()
self.Controller = keypairs.Controller()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Keypairs'])
self.app = fakes.wsgi_app(init_only=('os-keypairs',))
def test_keypair_list(self):
req = webob.Request.blank('/v2/fake/os-keypairs')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def _test_keypair_create_bad_request_case(self, body):
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
self._test_keypair_create_bad_request_case(body)
def test_keypair_create_with_name_too_long(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
self._test_keypair_create_bad_request_case(body)
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
self._test_keypair_create_bad_request_case(body)
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
self._test_keypair_create_bad_request_case(body)
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
self._test_keypair_create_bad_request_case(body)
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
# FIXME(ja): sholud we check that public_key was sent to create?
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertNotIn('private_key', res_dict['keypair'])
def test_keypair_import_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 413)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['overLimit']['message'])
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 413)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['overLimit']['message'])
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
body = {'keypair': {'name': 'create_duplicate'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 409)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Key pair 'create_duplicate' already exists.",
res_dict['conflictingRequest']['message'])
def test_keypair_delete(self):
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
def test_keypair_get_keypair_not_found(self):
req = webob.Request.blank('/v2/fake/os-keypairs/DOESNOTEXIST')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get",
db_key_pair_get_not_found)
req = webob.Request.blank('/v2/fake/os-keypairs/WHAT')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_show(self):
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY')
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual('foo', res_dict['keypair']['name'])
self.assertEqual('XXX', res_dict['keypair']['public_key'])
self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
def test_keypair_show_not_found(self):
def _db_key_pair_get(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_show_server(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get())
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get())
req = webob.Request.blank('/v2/fake/servers/1')
req.headers['Content-Type'] = 'application/json'
response = req.get_response(fakes.wsgi_app(init_only=('servers',)))
self.assertEqual(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
self.assertIn('key_name', res_dict['server'])
self.assertEqual(res_dict['server']['key_name'], '')
def test_detail_servers(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
fakes.fake_instance_get_all_by_filters())
req = fakes.HTTPRequest.blank('/v2/fake/servers/detail')
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
server_dicts = jsonutils.loads(res.body)['servers']
self.assertEqual(len(server_dicts), 5)
for server_dict in server_dicts:
self.assertIn('key_name', server_dict)
self.assertEqual(server_dict['key_name'], '')
class KeypairPolicyTest(test.TestCase):
def setUp(self):
super(KeypairPolicyTest, self).setUp()
self.KeyPairController = keypairs.KeypairController()
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY')
self.stubs.Set(db, "key_pair_get",
_db_key_pair_get)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
def test_keypair_list_fail_policy(self):
rules = {'compute_extension:keypairs:index':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs')
self.assertRaises(exception.Forbidden,
self.KeyPairController.index,
req)
def test_keypair_list_pass_policy(self):
rules = {'compute_extension:keypairs:index':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs')
res = self.KeyPairController.index(req)
self.assertIn('keypairs', res)
def test_keypair_show_fail_policy(self):
rules = {'compute_extension:keypairs:show':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs/FAKE')
self.assertRaises(exception.Forbidden,
self.KeyPairController.show,
req, 'FAKE')
def test_keypair_show_pass_policy(self):
rules = {'compute_extension:keypairs:show':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs/FAKE')
res = self.KeyPairController.show(req, 'FAKE')
self.assertIn('keypair', res)
def test_keypair_create_fail_policy(self):
rules = {'compute_extension:keypairs:create':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs')
req.method = 'POST'
self.assertRaises(exception.Forbidden,
self.KeyPairController.create,
req, {})
def test_keypair_create_pass_policy(self):
body = {'keypair': {'name': 'create_test'}}
rules = {'compute_extension:keypairs:create':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs')
req.method = 'POST'
res = self.KeyPairController.create(req, body)
self.assertIn('keypair', res)
def test_keypair_delete_fail_policy(self):
rules = {'compute_extension:keypairs:delete':
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
self.assertRaises(exception.Forbidden,
self.KeyPairController.delete,
req, 'FAKE')
def test_keypair_delete_pass_policy(self):
rules = {'compute_extension:keypairs:delete':
common_policy.parse_rule('')}
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
res = self.KeyPairController.delete(req, 'FAKE')
self.assertEqual(res.status_int, 202)
class KeypairsXMLSerializerTest(test.TestCase):
def setUp(self):
super(KeypairsXMLSerializerTest, self).setUp()
self.deserializer = wsgi.XMLDeserializer()
def test_default_serializer(self):
exemplar = dict(keypair=dict(
public_key='fake_public_key',
private_key='fake_private_key',
fingerprint='fake_fingerprint',
user_id='fake_user_id',
name='fake_key_name'))
serializer = keypairs.KeypairTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('keypair', tree.tag)
for child in tree:
self.assertIn(child.tag, exemplar['keypair'])
self.assertEqual(child.text, exemplar['keypair'][child.tag])
def test_index_serializer(self):
exemplar = dict(keypairs=[
dict(keypair=dict(
name='key1_name',
public_key='key1_key',
fingerprint='key1_fingerprint')),
dict(keypair=dict(
name='key2_name',
public_key='key2_key',
fingerprint='key2_fingerprint'))])
serializer = keypairs.KeypairsTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('keypairs', tree.tag)
self.assertEqual(len(exemplar['keypairs']), len(tree))
for idx, keypair in enumerate(tree):
self.assertEqual('keypair', keypair.tag)
kp_data = exemplar['keypairs'][idx]['keypair']
for child in keypair:
self.assertIn(child.tag, kp_data)
self.assertEqual(child.text, kp_data[child.tag])
def test_deserializer(self):
exemplar = dict(keypair=dict(
name='key_name',
public_key='public_key'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<keypair><name>key_name</name>'
'<public_key>public_key</public_key></keypair>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
|
{
"content_hash": "6e479b84cbb0eb3d8807739eae2aee86",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 79,
"avg_line_length": 38.738461538461536,
"alnum_prop": 0.5841938046068308,
"repo_name": "viggates/nova",
"id": "6ca588588ff22454aa16604582787d3397a26915",
"size": "18254",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/contrib/test_keypairs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14822788"
},
{
"name": "Shell",
"bytes": "18352"
}
],
"symlink_target": ""
}
|
import functools
import os
from django.core.exceptions import ImproperlyConfigured
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.template import Context, Template, TemplateDoesNotExist
from django.template.loader import get_template
from mailviews.messages import (TemplatedEmailMessageView,
TemplatedHTMLEmailMessageView)
from mailviews.previews import URL_NAMESPACE
from mailviews.tests.emails.views import (BasicEmailMessageView,
BasicHTMLEmailMessageView)
from mailviews.tests.emails.previews import (BasicPreview,
BasicHTMLPreview,
CustomizablePreview)
from mailviews.utils import split_docstring
try:
from django.test.utils import override_settings
except ImportError:
from mailviews.tests.utils import override_settings # noqa
using_test_templates = override_settings(
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), 'templates'),
),
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
)
)
class EmailMessageViewTestCase(TestCase):
def run(self, *args, **kwargs):
with using_test_templates:
return super(EmailMessageViewTestCase, self).run(*args, **kwargs)
def assertTemplateExists(self, name):
try:
get_template(name)
except TemplateDoesNotExist:
raise AssertionError('Template does not exist: %s' % name)
def assertTemplateDoesNotExist(self, name):
try:
self.assertTemplateExists(name)
except AssertionError:
return
raise AssertionError('Template exists: %s' % name)
def assertOutboxLengthEquals(self, length):
self.assertEqual(len(mail.outbox), length)
class TemplatedEmailMessageViewTestCase(EmailMessageViewTestCase):
message_class = TemplatedEmailMessageView
def setUp(self):
self.message = self.message_class()
self.template = 'Hello, world!'
self.subject = 'subject'
self.subject_template = Template('{{ subject }}')
self.body = 'body'
self.body_template = Template('{{ body }}')
self.context_dict = {
'subject': self.subject,
'body': self.body,
}
self.context = Context(self.context_dict)
self.render_subject = functools.partial(self.message.render_subject,
context=self.context)
self.render_body = functools.partial(self.message.render_body,
context=self.context)
def add_templates_to_message(self):
"""
Adds templates to the fixture message, ensuring it can be rendered.
"""
self.message.subject_template = self.subject_template
self.message.body_template = self.body_template
def test_subject_template_unconfigured(self):
self.assertRaises(ImproperlyConfigured, self.render_subject)
def test_subject_invalid_template_name(self):
template = 'invalid.txt'
self.assertTemplateDoesNotExist(template)
self.message.subject_template_name = template
self.assertRaises(TemplateDoesNotExist, self.render_subject)
def test_subject_template_name(self):
template = 'subject.txt'
self.assertTemplateExists(template)
self.message.subject_template_name = template
self.assertEqual(self.render_subject(), self.subject)
def test_subject_template(self):
self.message.subject_template = self.subject_template
self.assertEqual(self.render_subject(), self.subject)
def test_body_template_unconfigured(self):
self.assertRaises(ImproperlyConfigured, self.render_body)
def test_body_invalid_template_name(self):
template = 'invalid.txt'
self.assertTemplateDoesNotExist(template)
self.message.body_template_name = template
self.assertRaises(TemplateDoesNotExist, self.render_body)
def test_body_template_name(self):
template = 'body.txt'
self.assertTemplateExists(template)
self.message.body_template_name = template
self.assertEqual(self.render_body(), self.body + '\n')
def test_body_template(self):
self.message.body_template = self.body_template
self.assertEqual(self.render_body(), self.body)
def test_render_to_message(self):
self.add_templates_to_message()
message = self.message.render_to_message(self.context_dict)
self.assertEqual(message.subject, self.subject)
self.assertEqual(message.body, self.body)
def test_send(self):
self.add_templates_to_message()
self.message.send(self.context_dict, to=('ted@disqus.com',))
self.assertOutboxLengthEquals(1)
def test_custom_headers(self):
self.add_templates_to_message()
address = 'ted@disqus.com'
self.message.headers['Reply-To'] = address
self.assertEqual(self.message.headers['Reply-To'], address)
rendered = self.message.render_to_message()
self.assertEqual(rendered.extra_headers['Reply-To'], address)
rendered = self.message.render_to_message(headers={
'References': 'foo',
})
self.assertEqual(rendered.extra_headers['Reply-To'], address)
self.assertEqual(rendered.extra_headers['References'], 'foo')
class TemplatedHTMLEmailMessageViewTestCase(TemplatedEmailMessageViewTestCase):
message_class = TemplatedHTMLEmailMessageView
def setUp(self):
super(TemplatedHTMLEmailMessageViewTestCase, self).setUp()
self.html_body = 'html body'
self.html_body_template = Template('{{ html }}')
self.context_dict['html'] = self.html_body
self.context['html'] = self.html_body
self.render_html_body = functools.partial(
self.message.render_html_body,
context=self.context)
def add_templates_to_message(self):
"""
Adds templates to the fixture message, ensuring it can be rendered.
"""
super(TemplatedHTMLEmailMessageViewTestCase, self)\
.add_templates_to_message()
self.message.html_body_template = self.html_body_template
def test_html_body_template_unconfigured(self):
self.assertRaises(ImproperlyConfigured, self.render_html_body)
def test_html_body_invalid_template_name(self):
template = 'invalid.txt'
self.assertTemplateDoesNotExist(template)
self.message.html_body_template_name = template
self.assertRaises(TemplateDoesNotExist, self.render_html_body)
def test_html_body_template_name(self):
template = 'body.html'
self.assertTemplateExists(template)
self.message.html_body_template_name = template
self.assertEqual(self.render_html_body(), self.html_body + '\n')
def test_html_body_template(self):
self.message.html_body_template = self.html_body_template
self.assertEqual(self.render_html_body(), self.html_body)
def test_render_to_message(self):
self.add_templates_to_message()
message = self.message.render_to_message(self.context_dict)
self.assertEqual(message.subject, self.subject)
self.assertEqual(message.body, self.body)
self.assertEqual(message.alternatives, [(self.html_body, 'text/html')])
def test_send(self):
self.add_templates_to_message()
self.message.send(self.context_dict, to=('ted@disqus.com',))
self.assertOutboxLengthEquals(1)
class SplitDocstringTestCase(TestCase):
def test_split_docstring(self):
header, body = split_docstring(split_docstring)
self.assertEqual(header, "Splits the docstring of the given value into it's summary and body.")
def test_split_docstring_no_body(self):
def fn():
"""Does a thing."""
header, body = split_docstring(fn)
self.assertEqual(header, "Does a thing.")
class PreviewSiteTestCase(TestCase):
def setUp(self):
super(PreviewSiteTestCase, self).setUp()
self.client = Client()
def test_basic_preview(self):
url = reverse('%s:detail' % URL_NAMESPACE, kwargs={
'module': BasicEmailMessageView.__module__,
'preview': BasicPreview.__name__
})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn('#body-plain', response.content)
self.assertIn('#raw', response.content)
def test_basic_html_preview(self):
url = reverse('%s:detail' % URL_NAMESPACE, kwargs={
'module': BasicHTMLEmailMessageView.__module__,
'preview': BasicHTMLPreview.__name__
})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn('#html', response.content)
self.assertIn('#body-plain', response.content)
self.assertIn('#raw', response.content)
def test_customizable_preview(self):
url = reverse('%s:detail' % URL_NAMESPACE, kwargs={
'module': BasicEmailMessageView.__module__,
'preview': CustomizablePreview.__name__
})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn('<form', response.content)
self.assertIn('#body-plain', response.content)
self.assertIn('#raw', response.content)
|
{
"content_hash": "688ffd88b44ccb4719aef34f38d0add3",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 103,
"avg_line_length": 35.492592592592594,
"alnum_prop": 0.6599186058645519,
"repo_name": "paxnovem/django-mailviews",
"id": "2ee2ae6a704eb49226460f5ed05944cef2c117f5",
"size": "9583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailviews/tests/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "707"
},
{
"name": "HTML",
"bytes": "4806"
},
{
"name": "Makefile",
"bytes": "1033"
},
{
"name": "Python",
"bytes": "34195"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
long_description = open('README.rst').read()
setup(name='ndar-backend',
version='0.1.0',
description='NDAR back-end tools',
author='Christian Haselgrove',
author_email='christian.haselgrove@umassmed.edu',
url='https://github.com/chaselgrove/ndar/ndar_backend',
scripts=['store_first_all_results',
'store_recon_all_results',
'store_structural_qa'],
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering'],
license='BSD license',
long_description=long_description
)
# eof
|
{
"content_hash": "3282ac7dc68a015b7e705cf6fa140f90",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 62,
"avg_line_length": 37.36,
"alnum_prop": 0.5674518201284796,
"repo_name": "NDAR/NITRC-Pipeline-for-NDAR",
"id": "ce131b83580192ab6e3c8166ba483de85535873b",
"size": "953",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ndar_backend/setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "167799"
},
{
"name": "Shell",
"bytes": "790"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^get_labels$', views.GetLabels.as_view()),
url(r'^cancel_job/(?P<job_id>([0-9]+))$', views.CancelJob.as_view()),
url(r'^get_status$', views.GetStatus.as_view()),
url(r'^get_printers$', views.GetPrinters.as_view()),
url(r'^print_label$', views.PrintLabels.as_view()),
]
|
{
"content_hash": "ed4422f50b437844ea5cb2cf81060b44",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 73,
"avg_line_length": 36.5,
"alnum_prop": 0.6383561643835617,
"repo_name": "ojarva/home-info-display",
"id": "f3a2e6cb51ce518d10b2704fd6a07e150ee7b6cd",
"size": "365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homedisplay/control_printer/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "22171"
},
{
"name": "CoffeeScript",
"bytes": "115283"
},
{
"name": "HTML",
"bytes": "51598"
},
{
"name": "JavaScript",
"bytes": "9902"
},
{
"name": "Python",
"bytes": "310675"
},
{
"name": "Shell",
"bytes": "1617"
}
],
"symlink_target": ""
}
|
import pytest
import os
import funcy
import microcache
import requests_mock
import pypandoc
from hatchery import project
from hatchery import snippets
from hatchery import helpers
try:
from unittest import mock
except ImportError:
import mock
PROJECT_NAME = 'myproject'
PACKAGE_NAME = 'mypackage'
INDEX_URL = 'https://mocked.pypi.python.org/pypi'
def setup_module(module):
microcache.disable()
def teardown_module(module):
microcache.enable()
def _make_package(package_name, empty_module_files=[]):
package_path = package_name.replace('.', '/')
os.makedirs(package_path)
open(os.path.join(package_path, '__init__.py'), 'w').close()
for module_file in empty_module_files:
open(os.path.join(package_path, module_file), 'w').close()
def test_get_package_name(tmpdir):
with tmpdir.as_cwd():
with pytest.raises(project.ProjectError):
project.get_package_name()
_make_package('tests')
with pytest.raises(project.ProjectError):
project.get_package_name()
_make_package('package_name')
assert project.get_package_name() == 'package_name'
_make_package('package_name.subpackage')
assert project.get_package_name() == 'package_name'
_make_package('tests.subtests')
assert project.get_package_name() == 'package_name'
_make_package('another_root_package')
with pytest.raises(project.ProjectError):
project.get_package_name()
def test_project_has_setup_py(tmpdir):
with tmpdir.as_cwd():
assert project.project_has_setup_py() is False
open('setup.py', 'w').close()
assert project.project_has_setup_py() is True
def test_setup_py_uses__version_py(tmpdir):
with tmpdir.as_cwd():
open('setup.py', 'w').close()
assert project.setup_py_uses__version_py() is False
snippet_content = snippets.get_snippet_content(
snippet_name='setup.py',
package_name=PACKAGE_NAME
)
with open('setup.py', 'a') as setup_py:
setup_py.write(snippet_content)
assert project.setup_py_uses__version_py() is True
def test_setup_py_uses___version__(tmpdir):
with tmpdir.as_cwd():
open('setup.py', 'w').close()
assert project.setup_py_uses___version__() is False
with open('setup.py', 'a') as setup_py:
setup_py.write('setup(version=__version__)')
assert project.setup_py_uses___version__() is True
def test_package_has_version_file(tmpdir):
with tmpdir.as_cwd():
assert project.package_has_version_file(PACKAGE_NAME) is False
version_file = helpers.package_file_path('_version.py', PACKAGE_NAME)
_make_package(PACKAGE_NAME, empty_module_files=[os.path.basename(version_file)])
assert project.package_has_version_file(PACKAGE_NAME) is True
def test_version_file_has___version__(tmpdir):
with tmpdir.as_cwd():
version_file = helpers.package_file_path('_version.py', PACKAGE_NAME)
_make_package(PACKAGE_NAME, empty_module_files=[os.path.basename(version_file)])
assert project.version_file_has___version__(PACKAGE_NAME) is False
snippet_content = snippets.get_snippet_content('_version.py')
with open(version_file, 'a') as _version_py:
_version_py.write(snippet_content)
assert project.version_file_has___version__(PACKAGE_NAME) is True
def test_get_project_name(tmpdir):
with tmpdir.as_cwd():
with pytest.raises(IOError):
project.get_project_name()
with open('setup.py', 'w') as setup_py:
setup_py.write('setup(name="someproject")')
assert project.get_project_name() == 'someproject'
def test_get_version(tmpdir):
with tmpdir.as_cwd():
with pytest.raises(IOError):
project.get_version(PACKAGE_NAME)
version_file = helpers.package_file_path('_version.py', PACKAGE_NAME)
_make_package(PACKAGE_NAME, empty_module_files=[os.path.basename(version_file)])
with pytest.raises(project.ProjectError):
project.get_version(PACKAGE_NAME)
with open(version_file, 'w') as _version_py:
_version_py.write("__version__='someversion'")
with microcache.temporarily_enabled():
assert project.get_version(PACKAGE_NAME) == 'someversion'
snippet_content = snippets.get_snippet_content('_version.py')
with open(version_file, 'w') as _version_py:
_version_py.write(snippet_content)
assert project.get_version(PACKAGE_NAME) == 'someversion'
assert project.get_version(PACKAGE_NAME, ignore_cache=True) == 'managed by hatchery'
def test_set_version(tmpdir):
with tmpdir.as_cwd():
version_file = helpers.package_file_path('_version.py', PACKAGE_NAME)
_make_package(PACKAGE_NAME, empty_module_files=[os.path.basename(version_file)])
snippet_content = snippets.get_snippet_content('_version.py')
with open(version_file, 'w') as _version_py:
_version_py.write(snippet_content)
project.set_version(PACKAGE_NAME, '1.2.3')
version_file_content = helpers.get_file_content(version_file)
found = funcy.re_find(project.VERSION_SET_REGEX, version_file_content)
assert found['version'] == '1.2.3'
def test__get_uploaded_versions_warehouse():
api_url = '/'.join((INDEX_URL, PROJECT_NAME, 'json'))
with requests_mock.mock() as m:
m.get(api_url, status_code=404)
assert project._get_uploaded_versions_warehouse(PROJECT_NAME, INDEX_URL) is None
m.get(api_url, text='{"releases": {"0.1": [], "0.2": []}}')
assert set(project._get_uploaded_versions_warehouse(PROJECT_NAME, INDEX_URL)) == \
set(['0.1', '0.2'])
def test__get_upload_versions_pypicloud():
api_url = '/'.join((INDEX_URL.replace('/pypi', '/api/package'), PROJECT_NAME))
with requests_mock.mock() as m:
m.get(api_url, status_code=404)
assert project._get_uploaded_versions_pypicloud(PROJECT_NAME, INDEX_URL) is None
m.get(api_url, text='{"packages": [{"version": "0.1"}, {"version": "0.2"}]}')
assert set(project._get_uploaded_versions_pypicloud(PROJECT_NAME, INDEX_URL)) == \
set(['0.1', '0.2'])
def test__get_uploaded_versions(monkeypatch):
monkeypatch.setattr(project, '_get_uploaded_versions_warehouse', lambda a, b, c: None)
monkeypatch.setattr(project, '_get_uploaded_versions_pypicloud', lambda a, b, c: None)
assert project._get_uploaded_versions(PROJECT_NAME, INDEX_URL) == []
monkeypatch.setattr(project, '_get_uploaded_versions_warehouse', lambda a, b, c: ['0.1', '0.2'])
assert set(project._get_uploaded_versions(PROJECT_NAME, INDEX_URL)) == set(['0.1', '0.2'])
def test_version_already_uploaded(monkeypatch):
monkeypatch.setattr(project, '_get_uploaded_versions_warehouse', lambda a, b, c: None)
monkeypatch.setattr(project, '_get_uploaded_versions_pypicloud', lambda a, b, c: None)
assert project.version_already_uploaded(PROJECT_NAME, '0.1', INDEX_URL) is False
monkeypatch.setattr(project, '_get_uploaded_versions_warehouse', lambda a, b, c: ['0.1', '0.2'])
assert project.version_already_uploaded(PROJECT_NAME, '0.1', INDEX_URL) is True
assert project.version_already_uploaded(PROJECT_NAME, '0.3', INDEX_URL) is False
def test_get_latest_uploaded_version(monkeypatch):
monkeypatch.setattr(project, '_get_uploaded_versions_warehouse', lambda a, b, c: None)
monkeypatch.setattr(project, '_get_uploaded_versions_pypicloud', lambda a, b, c: None)
assert project.get_latest_uploaded_version(PROJECT_NAME, INDEX_URL) is None
monkeypatch.setattr(project, '_get_uploaded_versions_warehouse', lambda a, b, c: ['0.1', '0.2'])
assert project.get_latest_uploaded_version(PROJECT_NAME, INDEX_URL) == '0.2'
def test_version_is_latest(monkeypatch):
monkeypatch.setattr(project, '_get_uploaded_versions_warehouse', lambda a, b, c: None)
monkeypatch.setattr(project, '_get_uploaded_versions_pypicloud', lambda a, b, c: None)
assert project.version_is_latest(PROJECT_NAME, '0.1', INDEX_URL) is True
monkeypatch.setattr(project, '_get_uploaded_versions_warehouse', lambda a, b, c: ['0.1', '0.2'])
assert project.version_is_latest(PROJECT_NAME, '0.1.5', INDEX_URL) is False
assert project.version_is_latest(PROJECT_NAME, '0.2', INDEX_URL) is False
assert project.version_is_latest(PROJECT_NAME, '0.3', INDEX_URL) is True
def test_project_has_readme_md(tmpdir):
with tmpdir.as_cwd():
assert project.project_has_readme_md() is False
open('readme.md', 'w').close()
assert project.project_has_readme_md() is True
os.remove('readme.md')
open('README.md', 'w').close()
assert project.project_has_readme_md() is True
def _pandoc_installed():
try:
pypandoc.get_pandoc_path()
except OSError:
return False
return True
@pytest.mark.skipif(not _pandoc_installed(), reason='pandoc is not installed')
def test_convert_readme_to_rst(tmpdir):
def _mock_pypandoc_convert_OSError(filename, format):
raise OSError('this would happen if pandoc were not installed!')
with tmpdir.as_cwd():
with pytest.raises(project.ProjectError):
project.convert_readme_to_rst()
open('README', 'w').close()
with pytest.raises(project.ProjectError):
project.convert_readme_to_rst()
os.remove('README')
open('README.rst', 'w').close()
with pytest.raises(project.ProjectError):
project.convert_readme_to_rst()
os.remove('README.rst')
with open('README.md', 'w') as readme_md:
readme_md.write('# heading')
project.convert_readme_to_rst()
assert helpers.regex_in_file(r'=======', 'README.rst') is True
os.remove('README.rst')
with mock.patch('pypandoc.convert', _mock_pypandoc_convert_OSError):
with pytest.raises(project.ProjectError):
project.convert_readme_to_rst()
def test_get_packaged_files(tmpdir):
with tmpdir.as_cwd():
assert project.get_packaged_files('package') == []
os.mkdir('dist')
open(os.path.join('dist', 'package-ver.s.ion+1.tar.gz'), 'w').close()
assert project.get_packaged_files('package') == ['dist/package-ver.s.ion+1.tar.gz']
open(os.path.join('dist', 'package-ver.s.ion+2.tar.gz'), 'w').close()
assert len(project.get_packaged_files('package')) == 2
def test_multiple_packaged_versions(tmpdir):
with tmpdir.as_cwd():
os.mkdir('dist')
open(os.path.join('dist', 'package-ver.s.ion+1.tar.gz'), 'w').close()
assert not project.multiple_packaged_versions('package')
open(os.path.join('dist', 'package-ver.s.ion+2.tar.gz'), 'w').close()
assert project.multiple_packaged_versions('package')
|
{
"content_hash": "7deda49faec7411aa2b40f55efeb06cc",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 100,
"avg_line_length": 42.19230769230769,
"alnum_prop": 0.6510483135824977,
"repo_name": "ajk8/hatchery",
"id": "d0c8049184d4acb780159ae99a6813105dceeff9",
"size": "10970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57493"
}
],
"symlink_target": ""
}
|
from dataclasses import dataclass
from typing import List, Optional
import pytest
import simple_parsing
from simple_parsing import ConflictResolution, field
from . import TestSetup
from .example_use_cases import HParams, RunConfig, TrainConfig
@dataclass
class ClassA:
a: int = 1
@dataclass
class ClassB:
b: int = 2
@dataclass
class ClassC:
c: int = 3
@dataclass
class Container1(TestSetup):
v1: int = 0
class_a: ClassA = ClassA()
class_b: ClassB = ClassB()
@dataclass
class Container2(TestSetup):
v2: int = 0
class_a: ClassA = ClassA()
class_b: ClassB = ClassB()
@dataclass
class ContainerWithList(TestSetup):
list_of_class_c: List[ClassC] = field(default_factory=lambda: [ClassC()] * 2)
xfail_nesting_with_containers_isnt_supported_yet = pytest.mark.xfail(
reason="TODO: make sure this is how people would want to use this feature."
)
def test_nesting_no_args():
c1 = Container1.setup("")
assert c1.v1 == 0
assert c1.class_a.a == 1
assert c1.class_b.b == 2
def test_nesting_with_args():
c1 = Container1.setup("--a 123 --b 456 --v1 3")
assert c1.v1 == 3
assert c1.class_a.a == 123
assert c1.class_b.b == 456
@xfail_nesting_with_containers_isnt_supported_yet
def test_nesting_with_containers_no_args():
container = ContainerWithList.setup("")
assert len(container.list_of_class_c) == 2
@xfail_nesting_with_containers_isnt_supported_yet
def test_nesting_with_containers_with_args():
container = ContainerWithList.setup("--c 1 2")
assert len(container.list_of_class_c) == 2
c1, c2 = tuple(container.list_of_class_c)
assert c1.c == 1
assert isinstance(c1, ClassC)
assert c2.c == 2
assert isinstance(c2, ClassC)
@xfail_nesting_with_containers_isnt_supported_yet
def test_nesting_multiple_containers_with_args_separator():
container1, container2, container3 = ContainerWithList.setup_multiple(
3, "--c 1 2 --c 3 4 --c 5 6"
)
assert len(container1.list_of_class_c) == 2
c1, c2 = tuple(container1.list_of_class_c)
assert c1.c == 1
assert isinstance(c1, ClassC)
assert c2.c == 2
assert isinstance(c2, ClassC)
assert len(container2.list_of_class_c) == 2
c1, c2 = tuple(container2.list_of_class_c)
assert c1.c == 3
assert isinstance(c1, ClassC)
assert c2.c == 4
assert isinstance(c2, ClassC)
assert len(container3.list_of_class_c) == 2
c1, c2 = tuple(container3.list_of_class_c)
assert c1.c == 5
assert isinstance(c1, ClassC)
assert c2.c == 6
assert isinstance(c2, ClassC)
def test_train_config_example_no_args():
config = TrainConfig.setup("", conflict_resolution_mode=ConflictResolution.ALWAYS_MERGE)
assert isinstance(config.train, RunConfig)
import os
assert config.train.checkpoint_dir == os.path.join("train", "checkpoints")
assert isinstance(config.valid, RunConfig)
assert config.valid.checkpoint_dir == os.path.join("valid", "checkpoints")
print(TrainConfig.get_help_text())
def test_train_config_example_with_explicit_args():
config = TrainConfig.setup(
"--train_config.train.log_dir train "
"--train_config.train.hparams.batch_size 123 "
"--train_config.valid.log_dir valid "
"--train_config.valid.hparams.batch_size 456",
conflict_resolution_mode=ConflictResolution.EXPLICIT,
)
import os
assert isinstance(config.train, RunConfig)
assert config.train.checkpoint_dir == os.path.join("train", "checkpoints")
assert isinstance(config.train.hparams, HParams)
assert config.train.hparams.batch_size == 123
assert isinstance(config.valid, RunConfig)
assert config.valid.checkpoint_dir == os.path.join("valid", "checkpoints")
assert isinstance(config.valid.hparams, HParams)
assert config.valid.hparams.batch_size == 456
print(TrainConfig.get_help_text())
def test_nesting_defaults():
@dataclass
class A(TestSetup):
p: int
q: float
@dataclass
class B(TestSetup):
x: int
y: A
parser = simple_parsing.ArgumentParser()
default = B(x=3, y=A(p=4, q=0.1))
parser.add_arguments(B, dest="b", default=default)
assert parser.parse_args("").b == default
def test_nesting_defaults_with_optional():
@dataclass
class A(TestSetup):
p: int
q: float
@dataclass
class B(TestSetup):
x: int
y: Optional[A] = None # NOTE: The Optional annotation is causing trouble here.
# This is because of the code that we have to check for optional parameter groups. If we don't
# detect any arguments from the group of the type `A`, then we just use None, because the field
# is marked as Optional. However, we should instead use the default value that is provided as
# an argument to `add_arguments`.
parser = simple_parsing.ArgumentParser()
default = B(x=3, y=A(p=4, q=0.1))
parser.add_arguments(B, dest="b", default=default)
assert parser.parse_args("").b == default
|
{
"content_hash": "853c53d8c475ee4c1878155ce1626bd4",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 99,
"avg_line_length": 27.55191256830601,
"alnum_prop": 0.6741372471241571,
"repo_name": "lebrice/SimpleParsing",
"id": "f9f90b2fc688eb71b1abf39d9ccce598a0f50622",
"size": "5042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/nesting/test_nesting_simple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "607221"
}
],
"symlink_target": ""
}
|
import os
from zeus.config import ConfigManager
from zeus.common import FabricManager
from zeus.common import PasswordManager
from zeus.ubuntu import RepoManager
from fabric.api import parallel, roles, run, env
metadata = ConfigManager(os.environ["CONFIGFILE"])
passwords = PasswordManager(os.environ["PASSWORDCACHE"]).passwords
FabricManager.setup(metadata.roles_ports)
@parallel
@roles('openstack_rabbitmq')
def rabbit():
RepoManager.install("rabbitmq-server")
run("""
rabbitmqctl change_password openstack "%s" || \
rabbitmqctl add_user openstack "%s"
""" % (passwords["RABBIT_PASS"], passwords["RABBIT_PASS"]))
run("""
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
""")
|
{
"content_hash": "0dcce6dd2a865d7617f72e07f2cea95b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 22.838709677419356,
"alnum_prop": 0.7358757062146892,
"repo_name": "agabert/zeus",
"id": "0a5a042434a5519328796c5c781b0382b86ab360",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stages/rabbit/fabfile.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2541"
},
{
"name": "Python",
"bytes": "81125"
},
{
"name": "Shell",
"bytes": "1638"
}
],
"symlink_target": ""
}
|
"""
Views
"""
from aiohttp import web
async def get_worker(request):
"""
Test mongodb GET.
"""
db = request.app['db']
cursor = db.worker.find({})
if not cursor:
return web.HTTPNotFound(text='No page named')
docs = await cursor.to_list(None)
resp = []
for d in docs:
resp += [{
'worker_id': d['workerID'],
'name': d['name'],
'team': d['team']
}]
return web.json_response(resp)
|
{
"content_hash": "bce829e2fbc8b5aefcc402d96ee6d82f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 53,
"avg_line_length": 19.2,
"alnum_prop": 0.5104166666666666,
"repo_name": "grtfou/aio-tree",
"id": "22895c45cc19a32b2799b1a2ef365ae208ceff7b",
"size": "559",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "website/box/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8271"
},
{
"name": "Shell",
"bytes": "617"
}
],
"symlink_target": ""
}
|
import unittest
from biokbase.catalog.Impl import Catalog
from catalog_test_util import CatalogTestUtil
class AdminMethodsTest(unittest.TestCase):
def test_is_admin(self):
madeUpName = 'asdfasdf'
userName = self.cUtil.user_ctx()['user_id']
adminName = self.cUtil.admin_ctx()['user_id']
with self.assertRaisesRegex(ValueError, 'Can only check on own admin status'):
self.assertEqual(self.catalog.is_admin(self.cUtil.user_ctx(), adminName)[0], 0)
self.assertEqual(self.catalog.is_admin(self.cUtil.anonymous_ctx(), madeUpName)[0], 0)
self.assertEqual(self.catalog.is_admin(self.cUtil.user_ctx(), userName)[0], 0)
self.assertEqual(self.catalog.is_admin(self.cUtil.admin_ctx(), adminName)[0], 1)
# test with no token and user token (admin token gets tested in add_remove_developers
def test_list_approved_developers(self):
with self.assertRaisesRegex(ValueError, 'Only Admin users can list approved developers.'):
self.assertEqual(self.catalog.list_approved_developers(self.cUtil.anonymous_ctx())[0], 0)
self.assertEqual(self.catalog.list_approved_developers(self.cUtil.user_ctx())[0], 0)
# assumes no developers have been added yet
def test_add_remove_developers(self):
# nothing there yet
devs = self.catalog.list_approved_developers(self.cUtil.admin_ctx())[0]
self.assertEqual(devs, [])
is_approved = self.catalog.is_approved_developer([], self.cUtil.anonymous_ctx())[0]
self.assertEqual(is_approved, [])
is_approved = self.catalog.is_approved_developer(self.cUtil.anonymous_ctx(),
['somebody', 'otherperson'])[0]
self.assertEqual(is_approved, [0, 0])
# add somebody fails without admin user
with self.assertRaises(ValueError) as e:
self.catalog.approve_developer(self.cUtil.user_ctx(), 'alice')
self.assertEqual(str(e.exception),
'Only Admin users can approve or revoke developers.')
with self.assertRaises(ValueError) as e:
# should fail if we specified something empty
self.catalog.approve_developer(self.cUtil.admin_ctx(), ' ')
self.assertEqual(str(e.exception),
'No username provided')
# add some users
self.catalog.approve_developer(self.cUtil.admin_ctx(), 'eve')
self.catalog.approve_developer(self.cUtil.admin_ctx(), 'alice')
self.catalog.approve_developer(self.cUtil.admin_ctx(), 'bob')
self.catalog.approve_developer(self.cUtil.admin_ctx(),
'bob') # should be able to add again without error
devs = self.catalog.list_approved_developers(self.cUtil.admin_ctx())[0]
self.assertEqual(devs, ['alice', 'bob', 'eve']) # should be sorted
is_approved = self.catalog.is_approved_developer(self.cUtil.anonymous_ctx(),
['somebody', 'alice', 'otherperson',
'bob', 'bob'])[0]
self.assertEqual(is_approved, [0, 1, 0, 1, 1])
# remove some
with self.assertRaises(ValueError) as e:
# should fail, only admins can revoke users
self.catalog.revoke_developer(self.cUtil.user_ctx(), 'alice')
self.assertEqual(str(e.exception),
'Only Admin users can approve or revoke developers.')
with self.assertRaises(ValueError) as e:
# should fail if we misspelled a name
self.catalog.revoke_developer(self.cUtil.admin_ctx(), 'b0b')
self.assertEqual(str(e.exception),
'Cannot revoke "b0b", that developer was not found.')
with self.assertRaises(ValueError) as e:
# should fail if we specified something empty
self.catalog.revoke_developer(self.cUtil.admin_ctx(), ' ')
self.assertEqual(str(e.exception),
'No username provided')
self.catalog.revoke_developer(self.cUtil.admin_ctx(), 'alice')
# should have truncated list
devs = self.catalog.list_approved_developers(self.cUtil.admin_ctx())[0]
self.assertEqual(devs, ['bob', 'eve']) # should be sorted
is_approved = self.catalog.is_approved_developer(self.cUtil.anonymous_ctx(),
['somebody', 'alice', 'otherperson',
'bob', 'bob'])[0]
self.assertEqual(is_approved, [0, 0, 0, 1, 1])
# should block registration for non-developers
with self.assertRaises(ValueError) as e:
self.catalog.register_repo(self.cUtil.user_ctx(),
{'git_url': self.cUtil.get_test_repo_1()})
self.assertEqual(str(e.exception),
'You are not an approved developer. Contact us via http://kbase.us/contact-us/ to request approval.')
# after the developer is added, should be allowed to start now (give a bogus url so if finishes registration
# right away with an error
self.catalog.approve_developer(self.cUtil.admin_ctx(), self.cUtil.test_user_1)
self.catalog.register_repo(self.cUtil.user_ctx(), {'git_url': self.cUtil.get_test_repo_1(),
'commit_hash': '0760f1927f74a'})
while True:
state = self.catalog.get_module_state(self.cUtil.anonymous_ctx(),
{'git_url': self.cUtil.get_test_repo_1()})[0]
if state['registration'] in ['complete', 'error']:
break
def test_migrate_module_to_new_git_url(self):
params = {
'module_name': "release_history",
'current_git_url': "https://github.com/kbaseIncubator/release_history",
'new_git_url': "https://github.com/kbase/release_history"
}
# first make sure we can find a module with this name and url
info = self.catalog.get_module_info(self.cUtil.anonymous_ctx(),
{'module_name': params['module_name'],
'git_url': params['current_git_url']})[0]
self.assertEqual(info['module_name'], params['module_name'])
self.assertEqual(info['git_url'], params['current_git_url'])
self.assertEqual(info['language'], 'python')
# next make sure we get an error if we are not an admin
with self.assertRaises(ValueError) as e:
self.catalog.migrate_module_to_new_git_url(self.cUtil.user_ctx(), params)
self.assertEqual(str(e.exception),
'Only Admin users can migrate module git urls.')
# if we are an admin, then it should work
self.catalog.migrate_module_to_new_git_url(self.cUtil.admin_ctx(), params)
# the old record should not be retrievable by that url anymore
with self.assertRaises(ValueError) as e:
self.catalog.get_module_info(self.cUtil.anonymous_ctx(),
{'module_name': params['module_name'],
'git_url': params['current_git_url']})[0]
self.assertEqual(str(e.exception),
'Operation failed - module/repo is not registered.')
# but the new url should work
info = self.catalog.get_module_info(self.cUtil.anonymous_ctx(),
{'module_name': params['module_name'],
'git_url': params['new_git_url']})[0]
self.assertEqual(info['module_name'], params['module_name'])
self.assertEqual(info['git_url'], params['new_git_url'])
self.assertEqual(info['language'], 'python')
# things should fail if we just try again
with self.assertRaises(ValueError) as e:
self.catalog.migrate_module_to_new_git_url(self.cUtil.admin_ctx(), params)
self.assertEqual(str(e.exception),
'Cannot migrate git_url, no module found with the given name and current url.')
# or if the new url is not valid
params['current_git_url'] = params['new_git_url']
params['new_git_url'] = "http:not_a_url"
with self.assertRaises(ValueError) as e:
self.catalog.migrate_module_to_new_git_url(self.cUtil.admin_ctx(), params)
self.assertEqual(str(e.exception),
'The new git url is not a valid URL.')
# but we should be able to switch back
params['new_git_url'] = "https://github.com/kbaseIncubator/release_history"
self.catalog.migrate_module_to_new_git_url(self.cUtil.admin_ctx(), params)
info = self.catalog.get_module_info(self.cUtil.anonymous_ctx(),
{'module_name': params['module_name']})[0]
self.assertEqual(info['module_name'], params['module_name'])
self.assertEqual(info['git_url'], params['new_git_url'])
self.assertEqual(info['language'], 'python')
# Method migrated to core registration test, as this needs a fresh registration to test with NMS
# def test_active_inactive_setting(self):
# # next make sure we get an error if we are not an admin
# params = { 'module_name':"release_history" }
# with self.assertRaises(ValueError) as e:
# self.catalog.set_to_active(self.cUtil.user_ctx(),params)
# self.assertEqual(str(e.exception),
# 'Only Admin users can set a module to be active/inactive.')
# with self.assertRaises(ValueError) as e:
# self.catalog.set_to_inactive(self.cUtil.user_ctx(),params)
# self.assertEqual(str(e.exception),
# 'Only Admin users can set a module to be active/inactive.')
# # release_history module is active, but it should be fine to set it again
# self.catalog.set_to_active(self.cUtil.admin_ctx(),params)
# state = self.catalog.get_module_state(self.cUtil.admin_ctx(),params)[0]
# self.assertEqual(state['active'],1)
# # make it inactive (calling twice should be ok and shouldn't change anything)
# self.catalog.set_to_inactive(self.cUtil.admin_ctx(),params)
# state = self.catalog.get_module_state(self.cUtil.user_ctx(),params)[0]
# self.assertEqual(state['active'],0)
# self.catalog.set_to_inactive(self.cUtil.admin_ctx(),params)
# state = self.catalog.get_module_state(self.cUtil.user_ctx(),params)[0]
# self.assertEqual(state['active'],0)
# # these still shouldn't work
# with self.assertRaises(ValueError) as e:
# self.catalog.set_to_active(self.cUtil.user_ctx(),params)
# self.assertEqual(str(e.exception),
# 'Only Admin users can set a module to be active/inactive.')
# with self.assertRaises(ValueError) as e:
# self.catalog.set_to_inactive(self.cUtil.user_ctx(),params)
# self.assertEqual(str(e.exception),
# 'Only Admin users can set a module to be active/inactive.')
# # make it active one more time for kicks
# self.catalog.set_to_active(self.cUtil.admin_ctx(),params)
# state = self.catalog.get_module_state(self.cUtil.anonymous_ctx(),params)[0]
# self.assertEqual(state['active'],1)
def test_set_registration_state(self):
# first make sure the state is what we expect
repoSelectionParam = {'module_name': 'registration_in_progress'}
state = self.catalog.get_module_state(self.cUtil.user_ctx(), repoSelectionParam)[0]
self.assertEqual(state['registration'], 'building: doing stuff')
self.assertEqual(state['error_message'], '')
# throw an error- users should not be able to update state
params = {'module_name': 'registration_in_progress', 'registration_state': 'complete'}
with self.assertRaises(ValueError) as e:
self.catalog.set_registration_state(self.cUtil.user_ctx(), params)
self.assertEqual(str(e.exception),
'You do not have permission to modify the registration state of this module/repo.')
# state should still be the same
state = self.catalog.get_module_state(self.cUtil.user_ctx(), repoSelectionParam)[0]
self.assertEqual(state['registration'], 'building: doing stuff')
self.assertEqual(state['error_message'], '')
# admin can update the registration state to complete
self.catalog.set_registration_state(self.cUtil.admin_ctx(), params)
state = self.catalog.get_module_state(self.cUtil.user_ctx(), repoSelectionParam)[0]
self.assertEqual(state['registration'], 'complete')
self.assertEqual(state['error_message'], '')
# admin cannot set the state to error without an error message
params = {'module_name': 'registration_in_progress', 'registration_state': 'error'}
with self.assertRaises(ValueError) as e:
self.catalog.set_registration_state(self.cUtil.admin_ctx(), params)
self.assertEqual(str(e.exception),
'Update failed - if state is "error", you must also set an "error_message".')
state = self.catalog.get_module_state(self.cUtil.user_ctx(), repoSelectionParam)[0]
self.assertEqual(state['registration'], 'complete')
self.assertEqual(state['error_message'], '')
params = {'module_name': 'registration_in_progress', 'registration_state': 'error',
'error_message': 'something'}
self.catalog.set_registration_state(self.cUtil.admin_ctx(), params)
state = self.catalog.get_module_state(self.cUtil.user_ctx(), repoSelectionParam)[0]
self.assertEqual(state['registration'], 'error')
self.assertEqual(state['error_message'], 'something')
@classmethod
def setUpClass(cls):
print('++++++++++++ RUNNING admin_methods_test.py +++++++++++')
cls.cUtil = CatalogTestUtil('.') # TODO: pass in test directory from outside
cls.cUtil.setUp()
cls.catalog = Catalog(cls.cUtil.getCatalogConfig())
print('ready')
@classmethod
def tearDownClass(cls):
cls.cUtil.tearDown()
|
{
"content_hash": "e54a54d3093b348f86bd8b0bf1426bc0",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 127,
"avg_line_length": 54.734848484848484,
"alnum_prop": 0.6060899653979239,
"repo_name": "kbase/catalog",
"id": "aab3a69e04de654c665bed2a547064301235049a",
"size": "14450",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/admin_methods_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1280"
},
{
"name": "Java",
"bytes": "269378"
},
{
"name": "JavaScript",
"bytes": "40212"
},
{
"name": "Makefile",
"bytes": "3381"
},
{
"name": "Perl",
"bytes": "205528"
},
{
"name": "Python",
"bytes": "763758"
},
{
"name": "Ruby",
"bytes": "29674"
},
{
"name": "Shell",
"bytes": "3953"
}
],
"symlink_target": ""
}
|
__doc__ = """
Generic Taskmaster module for the SCons build engine.
This module contains the primary interface(s) between a wrapping user
interface and the SCons build engine. There are two key classes here:
Taskmaster
This is the main engine for walking the dependency graph and
calling things to decide what does or doesn't need to be built.
Task
This is the base class for allowing a wrapping interface to
decide what does or doesn't actually need to be done. The
intention is for a wrapping interface to subclass this as
appropriate for different types of behavior it may need.
The canonical example is the SCons native Python interface,
which has Task subclasses that handle its specific behavior,
like printing "`foo' is up to date" when a top-level target
doesn't need to be built, and handling the -c option by removing
targets as its "build" action. There is also a separate subclass
for suppressing this output when the -q option is used.
The Taskmaster instantiates a Task object for each (set of)
target(s) that it decides need to be evaluated and/or built.
"""
__revision__ = "src/engine/SCons/Taskmaster.py 2014/01/04 01:12:18 root"
from itertools import chain
import operator
import sys
import traceback
import SCons.Errors
import SCons.Node
import SCons.Warnings
StateString = SCons.Node.StateString
NODE_NO_STATE = SCons.Node.no_state
NODE_PENDING = SCons.Node.pending
NODE_EXECUTING = SCons.Node.executing
NODE_UP_TO_DATE = SCons.Node.up_to_date
NODE_EXECUTED = SCons.Node.executed
NODE_FAILED = SCons.Node.failed
print_prepare = 0 # set by option --debug=prepare
# A subsystem for recording stats about how different Nodes are handled by
# the main Taskmaster loop. There's no external control here (no need for
# a --debug= option); enable it by changing the value of CollectStats.
CollectStats = None
class Stats(object):
"""
A simple class for holding statistics about the disposition of a
Node by the Taskmaster. If we're collecting statistics, each Node
processed by the Taskmaster gets one of these attached, in which case
the Taskmaster records its decision each time it processes the Node.
(Ideally, that's just once per Node.)
"""
def __init__(self):
"""
Instantiates a Taskmaster.Stats object, initializing all
appropriate counters to zero.
"""
self.considered = 0
self.already_handled = 0
self.problem = 0
self.child_failed = 0
self.not_built = 0
self.side_effects = 0
self.build = 0
StatsNodes = []
fmt = "%(considered)3d "\
"%(already_handled)3d " \
"%(problem)3d " \
"%(child_failed)3d " \
"%(not_built)3d " \
"%(side_effects)3d " \
"%(build)3d "
def dump_stats():
for n in sorted(StatsNodes, key=lambda a: str(a)):
print (fmt % n.stats.__dict__) + str(n)
class Task(object):
"""
Default SCons build engine task.
This controls the interaction of the actual building of node
and the rest of the engine.
This is expected to handle all of the normally-customizable
aspects of controlling a build, so any given application
*should* be able to do what it wants by sub-classing this
class and overriding methods as appropriate. If an application
needs to customze something by sub-classing Taskmaster (or
some other build engine class), we should first try to migrate
that functionality into this class.
Note that it's generally a good idea for sub-classes to call
these methods explicitly to update state, etc., rather than
roll their own interaction with Taskmaster from scratch.
"""
def __init__(self, tm, targets, top, node):
self.tm = tm
self.targets = targets
self.top = top
self.node = node
self.exc_clear()
def trace_message(self, method, node, description='node'):
fmt = '%-20s %s %s\n'
return fmt % (method + ':', description, self.tm.trace_node(node))
def display(self, message):
"""
Hook to allow the calling interface to display a message.
This hook gets called as part of preparing a task for execution
(that is, a Node to be built). As part of figuring out what Node
should be built next, the actually target list may be altered,
along with a message describing the alteration. The calling
interface can subclass Task and provide a concrete implementation
of this method to see those messages.
"""
pass
def prepare(self):
"""
Called just before the task is executed.
This is mainly intended to give the target Nodes a chance to
unlink underlying files and make all necessary directories before
the Action is actually called to build the targets.
"""
global print_prepare
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.prepare()', self.node))
# Now that it's the appropriate time, give the TaskMaster a
# chance to raise any exceptions it encountered while preparing
# this task.
self.exception_raise()
if self.tm.message:
self.display(self.tm.message)
self.tm.message = None
# Let the targets take care of any necessary preparations.
# This includes verifying that all of the necessary sources
# and dependencies exist, removing the target file(s), etc.
#
# As of April 2008, the get_executor().prepare() method makes
# sure that all of the aggregate sources necessary to build this
# Task's target(s) exist in one up-front check. The individual
# target t.prepare() methods check that each target's explicit
# or implicit dependencies exists, and also initialize the
# .sconsign info.
executor = self.targets[0].get_executor()
executor.prepare()
for t in executor.get_action_targets():
if print_prepare:
print "Preparing target %s..."%t
for s in t.side_effects:
print "...with side-effect %s..."%s
t.prepare()
for s in t.side_effects:
if print_prepare:
print "...Preparing side-effect %s..."%s
s.prepare()
def get_target(self):
"""Fetch the target being built or updated by this task.
"""
return self.node
def needs_execute(self):
# TODO(deprecate): "return True" is the old default behavior;
# change it to NotImplementedError (after running through the
# Deprecation Cycle) so the desired behavior is explicitly
# determined by which concrete subclass is used.
#raise NotImplementedError
msg = ('Taskmaster.Task is an abstract base class; instead of\n'
'\tusing it directly, '
'derive from it and override the abstract methods.')
SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg)
return True
def execute(self):
"""
Called to execute the task.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
prepare(), executed() or failed().
"""
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.execute()', self.node))
try:
cached_targets = []
for t in self.targets:
if not t.retrieve_from_cache():
break
cached_targets.append(t)
if len(cached_targets) < len(self.targets):
# Remove targets before building. It's possible that we
# partially retrieved targets from the cache, leaving
# them in read-only mode. That might cause the command
# to fail.
#
for t in cached_targets:
try:
t.fs.unlink(t.path)
except (IOError, OSError):
pass
self.targets[0].build()
else:
for t in cached_targets:
t.cached = 1
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code)
except SCons.Errors.UserError:
raise
except SCons.Errors.BuildError:
raise
except Exception, e:
buildError = SCons.Errors.convert_to_BuildError(e)
buildError.node = self.targets[0]
buildError.exc_info = sys.exc_info()
raise buildError
def executed_without_callbacks(self):
"""
Called when the task has been successfully executed
and the Taskmaster instance doesn't want to call
the Node's callback methods.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_without_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED)
def executed_with_callbacks(self):
"""
Called when the task has been successfully executed and
the Taskmaster instance wants to call the Node's callback
methods.
This may have been a do-nothing operation (to preserve build
order), so we must check the node's state before deciding whether
it was "built", in which case we call the appropriate Node method.
In any event, we always call "visited()", which will handle any
post-visit actions that must take place regardless of whether
or not the target was an actual built target or a source Node.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_with_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED)
if not t.cached:
t.push_to_cache()
t.built()
t.visited()
executed = executed_with_callbacks
def failed(self):
"""
Default action when a task fails: stop the build.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
self.fail_stop()
def fail_stop(self):
"""
Explicit stop-the-build failure.
This sets failure status on the target nodes and all of
their dependent parent nodes.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.failed_stop()', self.node))
# Invoke will_not_build() to clean-up the pending children
# list.
self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
# Tell the taskmaster to not start any new tasks
self.tm.stop()
# We're stopping because of a build failure, but give the
# calling Task class a chance to postprocess() the top-level
# target under which the build failure occurred.
self.targets = [self.tm.current_top]
self.top = 1
def fail_continue(self):
"""
Explicit continue-the-build failure.
This sets failure status on the target nodes and all of
their dependent parent nodes.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.failed_continue()', self.node))
self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
def make_ready_all(self):
"""
Marks all targets in a task ready for execution.
This is used when the interface needs every target Node to be
visited--the canonical example being the "scons -c" option.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.make_ready_all()', self.node))
self.out_of_date = self.targets[:]
for t in self.targets:
t.disambiguate().set_state(NODE_EXECUTING)
for s in t.side_effects:
# add disambiguate here to mirror the call on targets above
s.disambiguate().set_state(NODE_EXECUTING)
def make_ready_current(self):
"""
Marks all targets in a task ready for execution if any target
is not current.
This is the default behavior for building only what's necessary.
"""
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.make_ready_current()',
self.node))
self.out_of_date = []
needs_executing = False
for t in self.targets:
try:
t.disambiguate().make_ready()
is_up_to_date = not t.has_builder() or \
(not t.always_build and t.is_up_to_date())
except EnvironmentError, e:
raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename)
if not is_up_to_date:
self.out_of_date.append(t)
needs_executing = True
if needs_executing:
for t in self.targets:
t.set_state(NODE_EXECUTING)
for s in t.side_effects:
# add disambiguate here to mirror the call on targets in first loop above
s.disambiguate().set_state(NODE_EXECUTING)
else:
for t in self.targets:
# We must invoke visited() to ensure that the node
# information has been computed before allowing the
# parent nodes to execute. (That could occur in a
# parallel build...)
t.visited()
t.set_state(NODE_UP_TO_DATE)
make_ready = make_ready_current
def postprocess(self):
"""
Post-processes a task after it's been executed.
This examines all the targets just built (or not, we don't care
if the build was successful, or even if there was no build
because everything was up-to-date) to see if they have any
waiting parent Nodes, or Nodes waiting on a common side effect,
that can be put back on the candidates list.
"""
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.postprocess()', self.node))
# We may have built multiple targets, some of which may have
# common parents waiting for this build. Count up how many
# targets each parent was waiting for so we can subtract the
# values later, and so we *don't* put waiting side-effect Nodes
# back on the candidates list if the Node is also a waiting
# parent.
targets = set(self.targets)
pending_children = self.tm.pending_children
parents = {}
for t in targets:
# A node can only be in the pending_children set if it has
# some waiting_parents.
if t.waiting_parents:
if T: T.write(self.trace_message(u'Task.postprocess()',
t,
'removing'))
pending_children.discard(t)
for p in t.waiting_parents:
parents[p] = parents.get(p, 0) + 1
for t in targets:
for s in t.side_effects:
if s.get_state() == NODE_EXECUTING:
s.set_state(NODE_NO_STATE)
for p in s.waiting_parents:
parents[p] = parents.get(p, 0) + 1
for p in s.waiting_s_e:
if p.ref_count == 0:
self.tm.candidates.append(p)
for p, subtract in parents.items():
p.ref_count = p.ref_count - subtract
if T: T.write(self.trace_message(u'Task.postprocess()',
p,
'adjusted parent ref count'))
if p.ref_count == 0:
self.tm.candidates.append(p)
for t in targets:
t.postprocess()
# Exception handling subsystem.
#
# Exceptions that occur while walking the DAG or examining Nodes
# must be raised, but must be raised at an appropriate time and in
# a controlled manner so we can, if necessary, recover gracefully,
# possibly write out signature information for Nodes we've updated,
# etc. This is done by having the Taskmaster tell us about the
# exception, and letting
def exc_info(self):
"""
Returns info about a recorded exception.
"""
return self.exception
def exc_clear(self):
"""
Clears any recorded exception.
This also changes the "exception_raise" attribute to point
to the appropriate do-nothing method.
"""
self.exception = (None, None, None)
self.exception_raise = self._no_exception_to_raise
def exception_set(self, exception=None):
"""
Records an exception to be raised at the appropriate time.
This also changes the "exception_raise" attribute to point
to the method that will, in fact
"""
if not exception:
exception = sys.exc_info()
self.exception = exception
self.exception_raise = self._exception_raise
def _no_exception_to_raise(self):
pass
def _exception_raise(self):
"""
Raises a pending exception that was recorded while getting a
Task ready for execution.
"""
exc = self.exc_info()[:]
try:
exc_type, exc_value, exc_traceback = exc
except ValueError:
exc_type, exc_value = exc
exc_traceback = None
raise exc_type, exc_value, exc_traceback
class AlwaysTask(Task):
def needs_execute(self):
"""
Always returns True (indicating this Task should always
be executed).
Subclasses that need this behavior (as opposed to the default
of only executing Nodes that are out of date w.r.t. their
dependencies) can use this as follows:
class MyTaskSubclass(SCons.Taskmaster.Task):
needs_execute = SCons.Taskmaster.Task.execute_always
"""
return True
class OutOfDateTask(Task):
def needs_execute(self):
"""
Returns True (indicating this Task should be executed) if this
Task's target state indicates it needs executing, which has
already been determined by an earlier up-to-date check.
"""
return self.targets[0].get_state() == SCons.Node.executing
def find_cycle(stack, visited):
if stack[-1] in visited:
return None
visited.add(stack[-1])
for n in stack[-1].waiting_parents:
stack.append(n)
if stack[0] == stack[-1]:
return stack
if find_cycle(stack, visited):
return stack
stack.pop()
return None
class Taskmaster(object):
"""
The Taskmaster for walking the dependency DAG.
"""
def __init__(self, targets=[], tasker=None, order=None, trace=None):
self.original_top = targets
self.top_targets_left = targets[:]
self.top_targets_left.reverse()
self.candidates = []
if tasker is None:
tasker = OutOfDateTask
self.tasker = tasker
if not order:
order = lambda l: l
self.order = order
self.message = None
self.trace = trace
self.next_candidate = self.find_next_candidate
self.pending_children = set()
def find_next_candidate(self):
"""
Returns the next candidate Node for (potential) evaluation.
The candidate list (really a stack) initially consists of all of
the top-level (command line) targets provided when the Taskmaster
was initialized. While we walk the DAG, visiting Nodes, all the
children that haven't finished processing get pushed on to the
candidate list. Each child can then be popped and examined in
turn for whether *their* children are all up-to-date, in which
case a Task will be created for their actual evaluation and
potential building.
Here is where we also allow candidate Nodes to alter the list of
Nodes that should be examined. This is used, for example, when
invoking SCons in a source directory. A source directory Node can
return its corresponding build directory Node, essentially saying,
"Hey, you really need to build this thing over here instead."
"""
try:
return self.candidates.pop()
except IndexError:
pass
try:
node = self.top_targets_left.pop()
except IndexError:
return None
self.current_top = node
alt, message = node.alter_targets()
if alt:
self.message = message
self.candidates.append(node)
self.candidates.extend(self.order(alt))
node = self.candidates.pop()
return node
def no_next_candidate(self):
"""
Stops Taskmaster processing by not returning a next candidate.
Note that we have to clean-up the Taskmaster candidate list
because the cycle detection depends on the fact all nodes have
been processed somehow.
"""
while self.candidates:
candidates = self.candidates
self.candidates = []
self.will_not_build(candidates)
return None
def _validate_pending_children(self):
"""
Validate the content of the pending_children set. Assert if an
internal error is found.
This function is used strictly for debugging the taskmaster by
checking that no invariants are violated. It is not used in
normal operation.
The pending_children set is used to detect cycles in the
dependency graph. We call a "pending child" a child that is
found in the "pending" state when checking the dependencies of
its parent node.
A pending child can occur when the Taskmaster completes a loop
through a cycle. For example, lets imagine a graph made of
three node (A, B and C) making a cycle. The evaluation starts
at node A. The taskmaster first consider whether node A's
child B is up-to-date. Then, recursively, node B needs to
check whether node C is up-to-date. This leaves us with a
dependency graph looking like:
Next candidate \
\
Node A (Pending) --> Node B(Pending) --> Node C (NoState)
^ |
| |
+-------------------------------------+
Now, when the Taskmaster examines the Node C's child Node A,
it finds that Node A is in the "pending" state. Therefore,
Node A is a pending child of node C.
Pending children indicate that the Taskmaster has potentially
loop back through a cycle. We say potentially because it could
also occur when a DAG is evaluated in parallel. For example,
consider the following graph:
Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ...
| ^
| |
+----------> Node D (NoState) --------+
/
Next candidate /
The Taskmaster first evaluates the nodes A, B, and C and
starts building some children of node C. Assuming, that the
maximum parallel level has not been reached, the Taskmaster
will examine Node D. It will find that Node C is a pending
child of Node D.
In summary, evaluating a graph with a cycle will always
involve a pending child at one point. A pending child might
indicate either a cycle or a diamond-shaped DAG. Only a
fraction of the nodes ends-up being a "pending child" of
another node. This keeps the pending_children set small in
practice.
We can differentiate between the two cases if we wait until
the end of the build. At this point, all the pending children
nodes due to a diamond-shaped DAG will have been properly
built (or will have failed to build). But, the pending
children involved in a cycle will still be in the pending
state.
The taskmaster removes nodes from the pending_children set as
soon as a pending_children node moves out of the pending
state. This also helps to keep the pending_children set small.
"""
for n in self.pending_children:
assert n.state in (NODE_PENDING, NODE_EXECUTING), \
(str(n), StateString[n.state])
assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents))
for p in n.waiting_parents:
assert p.ref_count > 0, (str(n), str(p), p.ref_count)
def trace_message(self, message):
return 'Taskmaster: %s\n' % message
def trace_node(self, node):
return '<%-10s %-3s %s>' % (StateString[node.get_state()],
node.ref_count,
repr(str(node)))
def _find_next_ready_node(self):
"""
Finds the next node that is ready to be built.
This is *the* main guts of the DAG walk. We loop through the
list of candidates, looking for something that has no un-built
children (i.e., that is a leaf Node or has dependencies that are
all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
(both the target Node itself and its sources, which are always
scanned in the context of a given target) to discover implicit
dependencies. A Node that must wait for some children to be
built will be put back on the candidates list after the children
have finished building. A Node that has been put back on the
candidates list in this way may have itself (or its sources)
re-scanned, in order to handle generated header files (e.g.) and
the implicit dependencies therein.
Note that this method does not do any signature calculation or
up-to-date check itself. All of that is handled by the Task
class. This is purely concerned with the dependency graph walk.
"""
self.ready_exc = None
T = self.trace
if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))
while True:
node = self.next_candidate()
if node is None:
if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
return None
node = node.disambiguate()
state = node.get_state()
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
if CollectStats:
if not hasattr(node, 'stats'):
node.stats = Stats()
StatsNodes.append(node)
S = node.stats
S.considered = S.considered + 1
else:
S = None
if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node)))
if state == NODE_NO_STATE:
# Mark this node as being on the execution stack:
node.set_state(NODE_PENDING)
elif state > NODE_PENDING:
# Skip this node if it has already been evaluated:
if S: S.already_handled = S.already_handled + 1
if T: T.write(self.trace_message(u' already handled (executed)'))
continue
executor = node.get_executor()
try:
children = executor.get_all_children()
except SystemExit:
exc_value = sys.exc_info()[1]
e = SCons.Errors.ExplicitExit(node, exc_value.code)
self.ready_exc = (SCons.Errors.ExplicitExit, e)
if T: T.write(self.trace_message(' SystemExit'))
return node
except Exception, e:
# We had a problem just trying to figure out the
# children (like a child couldn't be linked in to a
# VariantDir, or a Scanner threw something). Arrange to
# raise the exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if S: S.problem = S.problem + 1
if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e))
return node
children_not_visited = []
children_pending = set()
children_not_ready = []
children_failed = False
for child in chain(executor.get_all_prerequisites(), children):
childstate = child.get_state()
if T: T.write(self.trace_message(u' ' + self.trace_node(child)))
if childstate == NODE_NO_STATE:
children_not_visited.append(child)
elif childstate == NODE_PENDING:
children_pending.add(child)
elif childstate == NODE_FAILED:
children_failed = True
if childstate <= NODE_EXECUTING:
children_not_ready.append(child)
# These nodes have not even been visited yet. Add
# them to the list so that on some next pass we can
# take a stab at evaluating them (or their children).
children_not_visited.reverse()
self.candidates.extend(self.order(children_not_visited))
#if T and children_not_visited:
# T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))
# T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates)))
# Skip this node if any of its children have failed.
#
# This catches the case where we're descending a top-level
# target and one of our children failed while trying to be
# built by a *previous* descent of an earlier top-level
# target.
#
# It can also occur if a node is reused in multiple
# targets. One first descends though the one of the
# target, the next time occurs through the other target.
#
# Note that we can only have failed_children if the
# --keep-going flag was used, because without it the build
# will stop before diving in the other branch.
#
# Note that even if one of the children fails, we still
# added the other children to the list of candidate nodes
# to keep on building (--keep-going).
if children_failed:
for n in executor.get_action_targets():
n.set_state(NODE_FAILED)
if S: S.child_failed = S.child_failed + 1
if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
continue
if children_not_ready:
for child in children_not_ready:
# We're waiting on one or more derived targets
# that have not yet finished building.
if S: S.not_built = S.not_built + 1
# Add this node to the waiting parents lists of
# anything we're waiting on, with a reference
# count so we can be put back on the list for
# re-evaluation when they've all finished.
node.ref_count = node.ref_count + child.add_to_waiting_parents(node)
if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' %
(self.trace_node(node), repr(str(child)))))
if T:
for pc in children_pending:
T.write(self.trace_message(' adding %s to the pending children set\n' %
self.trace_node(pc)))
self.pending_children = self.pending_children | children_pending
continue
# Skip this node if it has side-effects that are
# currently being built:
wait_side_effects = False
for se in executor.get_action_side_effects():
if se.get_state() == NODE_EXECUTING:
se.add_to_waiting_s_e(node)
wait_side_effects = True
if wait_side_effects:
if S: S.side_effects = S.side_effects + 1
continue
# The default when we've gotten through all of the checks above:
# this node is ready to be built.
if S: S.build = S.build + 1
if T: T.write(self.trace_message(u'Evaluating %s\n' %
self.trace_node(node)))
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
return node
return None
def next_task(self):
"""
Returns the next task to be executed.
This simply asks for the next Node to be evaluated, and then wraps
it in the specific Task subclass with which we were initialized.
"""
node = self._find_next_ready_node()
if node is None:
return None
tlist = node.get_executor().get_all_targets()
task = self.tasker(self, tlist, node in self.original_top, node)
try:
task.make_ready()
except:
# We had a problem just trying to get this task ready (like
# a child couldn't be linked in to a VariantDir when deciding
# whether this node is current). Arrange to raise the
# exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if self.ready_exc:
task.exception_set(self.ready_exc)
self.ready_exc = None
return task
def will_not_build(self, nodes, node_func=lambda n: None):
"""
Perform clean-up about nodes that will never be built. Invokes
a user defined function on all of these nodes (including all
of their parents).
"""
T = self.trace
pending_children = self.pending_children
to_visit = set(nodes)
pending_children = pending_children - to_visit
if T:
for n in nodes:
T.write(self.trace_message(' removing node %s from the pending children set\n' %
self.trace_node(n)))
try:
while len(to_visit):
node = to_visit.pop()
node_func(node)
# Prune recursion by flushing the waiting children
# list immediately.
parents = node.waiting_parents
node.waiting_parents = set()
to_visit = to_visit | parents
pending_children = pending_children - parents
for p in parents:
p.ref_count = p.ref_count - 1
if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' %
self.trace_node(p)))
except KeyError:
# The container to_visit has been emptied.
pass
# We have the stick back the pending_children list into the
# taskmaster because the python 1.5.2 compatibility does not
# allow us to use in-place updates
self.pending_children = pending_children
def stop(self):
"""
Stops the current build completely.
"""
self.next_candidate = self.no_next_candidate
def cleanup(self):
"""
Check for dependency cycles.
"""
if not self.pending_children:
return
nclist = [(n, find_cycle([n], set())) for n in self.pending_children]
genuine_cycles = [
node for node,cycle in nclist
if cycle or node.get_state() != NODE_EXECUTED
]
if not genuine_cycles:
# All of the "cycles" found were single nodes in EXECUTED state,
# which is to say, they really weren't cycles. Just return.
return
desc = 'Found dependency cycle(s):\n'
for node, cycle in nclist:
if cycle:
desc = desc + " " + " -> ".join(map(str, cycle)) + "\n"
else:
desc = desc + \
" Internal Error: no cycle found for node %s (%s) in state %s\n" % \
(node, repr(node), StateString[node.get_state()])
raise SCons.Errors.UserError(desc)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "1977964b92cfbe074ef6772f4fb86a1a",
"timestamp": "",
"source": "github",
"line_count": 1010,
"max_line_length": 115,
"avg_line_length": 38.353465346534655,
"alnum_prop": 0.5744895061569043,
"repo_name": "Distrotech/scons",
"id": "be4b599195d19d95e12debb5dee02f041dde0c16",
"size": "39913",
"binary": false,
"copies": "2",
"ref": "refs/heads/distrotech-scons",
"path": "build/scons/engine/SCons/Taskmaster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "12517068"
},
{
"name": "Shell",
"bytes": "20589"
}
],
"symlink_target": ""
}
|
from django.db.backends.mysql.base import *
from django.db.backends.mysql.base import DatabaseWrapper as MySQLDatabaseWrapper
from django.db.backends.mysql.base import DatabaseOperations as MySQLDatabaseOperations
class DatabaseOperations(MySQLDatabaseOperations):
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return cursor._last_executed
class DatabaseWrapper(MySQLDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.ops = DatabaseOperations()
|
{
"content_hash": "84207280587cff25844132765c7cc527",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 87,
"avg_line_length": 45.11764705882353,
"alnum_prop": 0.7366362451108214,
"repo_name": "350dotorg/akcrm",
"id": "097e169b4431ac64f9c695f0bfb96c1eacdc9da2",
"size": "767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysql_echo/backend/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "529878"
},
{
"name": "JavaScript",
"bytes": "757124"
},
{
"name": "Python",
"bytes": "101919"
}
],
"symlink_target": ""
}
|
"""The Manager orchestrates the overall process of running layout tests.
This includes finding tests to run, reading the test expectations,
starting the required helper servers, deciding the order and way to
run the tests, retrying fails tests and collecting the test results,
including crash logs, and mismatches with expectations.
The Manager object has a constructor and one main method called run.
"""
import json
import logging
import random
import sys
import time
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_input import TestInput
from webkitpy.tool import grammar
_log = logging.getLogger(__name__)
# Builder base URL where we have the archived test results.
BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of layout tests."""
def __init__(self, port, options, printer):
"""Initialize test runner data structures.
Args:
port: An object implementing platform-specific functionality.
options: An options argument which contains command line options.
printer: A Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR
self.PERF_SUBDIR = 'perf'
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
self.ARCHIVED_RESULTS_LIMIT = 25
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._finder = LayoutTestFinder(self._port, self._options)
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def run(self, args):
"""Run the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update("Collecting tests ...")
running_all_tests = False
try:
paths, test_names, running_all_tests = self._collect_tests(args)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
self._printer.write_update("Parsing expectations ...")
self._expectations = test_expectations.TestExpectations(self._port, test_names)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
_log.critical('No tests to run.')
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
# Don't retry failures if an explicit list of tests was passed in.
if self._options.retry_failures is None:
should_retry_failures = len(paths) < len(test_names)
else:
should_retry_failures = self._options.retry_failures
enabled_pixel_tests_in_retry = False
try:
self._start_servers(tests_to_run)
num_workers = self._port.num_workers(int(self._options.child_processes))
initial_results = self._run_tests(
tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
num_workers)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = should_retry_failures and not (
initial_results.interrupted or initial_results.keyboard_interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
for retry_attempt in xrange(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info('Retrying %s, attempt %d of %d...',
grammar.pluralize('unexpected failure', len(tests_to_retry)),
retry_attempt, self._options.num_retries)
retry_results = self._run_tests(tests_to_retry,
tests_to_skip=set(),
repeat_each=1,
iterations=1,
num_workers=num_workers,
retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
if enabled_pixel_tests_in_retry:
self._options.pixel_tests = False
finally:
self._stop_servers()
self._clean_up_run()
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update("looking for new crash logs")
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
_log.debug("summarizing results")
summarized_full_results = test_run_results.summarize_results(
self._port, self._expectations, initial_results, all_retry_results,
enabled_pixel_tests_in_retry)
summarized_failing_results = test_run_results.summarize_results(
self._port, self._expectations, initial_results, all_retry_results,
enabled_pixel_tests_in_retry, only_include_failing=True)
exit_code = summarized_failing_results['num_regressions']
if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)',
exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)
exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests)
if self._options.write_full_results_to:
self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"),
self._options.write_full_results_to)
self._upload_json_files()
results_path = self._filesystem.join(self._results_directory, "results.html")
self._copy_results_html_file(results_path)
if initial_results.keyboard_interrupted:
exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = test_run_results.EARLY_EXIT_STATUS
if self._options.show_results and (
exit_code or (self._options.full_results_html and initial_results.total_failures)):
self._port.show_results_html_file(results_path)
self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
return test_run_results.RunDetails(
exit_code, summarized_full_results, summarized_failing_results,
initial_results, all_retry_results, enabled_pixel_tests_in_retry)
def _collect_tests(self, args):
return self._finder.find_tests(args, test_list=self._options.test_list,
fastest_percentile=self._options.fastest)
def _is_http_test(self, test):
return (
test.startswith(self.HTTP_SUBDIR) or
self._is_websocket_test(test) or
self._port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR in test
)
def _is_inspector_test(self, test):
return self.INSPECTOR_SUBDIR in test
def _is_websocket_test(self, test):
if self._port.should_use_wptserve(test):
return False
return self.WEBSOCKET_SUBDIR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
tests_to_run = [test for test in test_names if test not in tests_to_skip]
if not tests_to_run:
return tests_to_run, tests_to_skip
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
tests_to_run.sort(key=self._port.test_key)
elif self._options.order == 'random':
random.shuffle(tests_to_run)
elif self._options.order == 'random-seeded':
rnd = random.Random()
rnd.seed(4) # http://xkcd.com/221/
rnd.shuffle(tests_to_run)
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
tests_to_skip.update(tests_in_other_chunks)
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
return TestInput(test_file,
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
self._test_requires_lock(test_file),
should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
def _test_requires_lock(self, test_file):
"""Return True if the test needs to be locked when running multiple
instances of this test runner.
Perf tests are locked because heavy load caused by running other
tests in parallel might cause some of them to time out.
"""
return self._is_http_test(test_file) or self._is_perf_test(test_file)
def _test_is_expected_missing(self, test_file):
expectations = self._expectations.model().get_expectations(test_file)
return test_expectations.MISSING in expectations or test_expectations.NEEDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in expectations
def _test_is_slow(self, test_file):
return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)
def _needs_servers(self, test_names):
return any(self._test_requires_lock(test_name) for test_name in test_names)
def _rename_results_folder(self):
try:
timestamp = time.strftime(
"%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))))
except (IOError, OSError) as e:
# It might be possible that results.html was not generated in previous run, because the test
# run was interrupted even before testing started. In those cases, don't archive the folder.
# Simply override the current folder contents with new results.
import errno
if e.errno == errno.EEXIST or e.errno == errno.ENOENT:
self._printer.write_update("No results.html file found in previous run, skipping it.")
return None
archived_name = ''.join((self._filesystem.basename(self._results_directory), "_", timestamp))
archived_path = self._filesystem.join(self._filesystem.dirname(self._results_directory), archived_name)
self._filesystem.move(self._results_directory, archived_path)
def _delete_dirs(self, dir_list):
for dir in dir_list:
self._filesystem.rmtree(dir)
def _limit_archived_results_count(self):
results_directory_path = self._filesystem.dirname(self._results_directory)
file_list = self._filesystem.listdir(results_directory_path)
results_directories = []
for dir in file_list:
file_path = self._filesystem.join(results_directory_path, dir)
if self._filesystem.isdir(file_path) and self._results_directory in file_path:
results_directories.append(file_path)
results_directories.sort(key=lambda x: self._filesystem.mtime(x))
self._printer.write_update("Clobbering excess archived results in %s" % results_directory_path)
self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT])
def _set_up_run(self, test_names):
self._printer.write_update("Checking build ...")
if self._options.build:
exit_code = self._port.check_build(self._needs_servers(test_names), self._printer)
if exit_code:
_log.error("Build check failed")
return exit_code
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
if self._options.pixel_tests:
self._printer.write_update("Starting pixel test helper ...")
self._port.start_helper()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update("Checking system dependencies ...")
exit_code = self._port.check_sys_deps(self._needs_servers(test_names))
if exit_code:
self._port.stop_helper()
return exit_code
if self._options.clobber_old_results:
self._clobber_old_results()
elif self._filesystem.exists(self._results_directory):
self._limit_archived_results_count()
# Rename the existing results folder for archiving.
self._rename_results_folder()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(self._results_directory)
self._port.setup_test_run()
return test_run_results.OK_EXIT_STATUS
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations,
num_workers, retry_attempt=0):
test_inputs = []
for _ in xrange(iterations):
for test in tests_to_run:
for _ in xrange(repeat_each):
test_inputs.append(self._test_input_for_file(test))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers, retry_attempt)
def _start_servers(self, tests_to_run):
if self._port.is_wptserve_enabled() and any(self._port.is_wptserve_test(test) for test in tests_to_run):
self._printer.write_update('Starting WPTServe ...')
self._port.start_wptserve()
self._wptserve_started = True
if self._port.requires_http_server() or any((self._is_http_test(test) or self._is_inspector_test(test))
for test in tests_to_run):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(additional_dirs={}, number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._wptserve_started:
self._printer.write_update('Stopping WPTServe ...')
self._wptserve_started = False
self._port.stop_wptserve()
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug("Flushing stdout")
sys.stdout.flush()
_log.debug("Flushing stderr")
sys.stderr.flush()
_log.debug("Stopping helper")
self._port.stop_helper()
_log.debug("Cleaning up port")
self._port.clean_up_test_run()
def _force_pixel_tests_if_needed(self):
if self._options.pixel_tests:
return False
_log.debug("Restarting helper")
self._port.stop_helper()
self._options.pixel_tests = True
self._port.start_helper()
return True
def _look_for_new_crash_logs(self, run_results, start_time):
"""Looks for and writes new crash logs, at the end of the test run.
Since crash logs can take a long time to be written out if the system is
under stress, do a second pass at the end of the test run.
Args:
run_results: The results of the test run.
start_time: Time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
for test, result in run_results.unexpected_results_by_name.iteritems():
if result.type != test_expectations.CRASH:
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash):
continue
if failure.has_log:
continue
crashed_processes.append([test, failure.process_name, failure.pid])
sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
if sample_files:
for test, sample_file in sample_files.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.copy_sample_file(sample_file)
crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
if crash_logs:
for test, crash_log in crash_logs.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.write_crash_log(crash_log)
def _clobber_old_results(self):
dir_above_results_path = self._filesystem.dirname(self._results_directory)
self._printer.write_update("Clobbering old results in %s" % dir_above_results_path)
if not self._filesystem.exists(dir_above_results_path):
return
file_list = self._filesystem.listdir(dir_above_results_path)
results_directories = []
for dir in file_list:
file_path = self._filesystem.join(dir_above_results_path, dir)
if self._filesystem.isdir(file_path) and self._results_directory in file_path:
results_directories.append(file_path)
self._delete_dirs(results_directories)
# Port specific clean-up.
self._port.clobber_old_port_specific_results()
def _tests_to_retry(self, run_results):
# TODO(ojan): This should also check that result.type != test_expectations.MISSING since retrying missing expectations is silly.
# But that's a bit tricky since we only consider the last retry attempt for the count of unexpected regressions.
return [result.test_name for result in run_results.unexpected_results_by_name.values(
) if result.type != test_expectations.PASS]
def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results, running_all_tests):
_log.debug("Writing JSON files in %s.", self._results_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
# Save out the times data so we can use it for --fastest in the future.
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(self._filesystem.dirname(bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie, bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._results_directory, "stats.json")
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
# We write failing_results.json out as jsonp because we need to load it
# from a file url for results.html and Chromium doesn't allow that.
json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
if self._options.json_test_results:
json_results_generator.write_json(self._filesystem, summarized_failing_results, self._options.json_test_results)
_log.debug("Finished writing JSON files.")
def _upload_json_files(self):
if not self._options.test_results_server:
return
if not self._options.master_name:
_log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
return
_log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
attrs = [("builder", self._options.builder_name),
("testtype", self._options.step_name),
("master", self._options.master_name)]
files = [(file, self._filesystem.join(self._results_directory, file))
for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
url = "http://%s/testfile/upload" % self._options.test_results_server
# Set uploading timeout in case appengine server is having problems.
# 120 seconds are more than enough to upload test results.
uploader = FileUploader(url, 120)
try:
response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
if response:
if response.code == 200:
_log.debug("JSON uploaded.")
else:
_log.debug("JSON upload failed, %d: '%s'", response.code, response.read())
else:
_log.error("JSON upload failed; no response returned")
except Exception as err:
_log.error("Upload failed: %s", err)
def _copy_results_html_file(self, destination_path):
base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
results_file = self._filesystem.join(base_dir, 'results.html')
# Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
# so make sure it exists before we try to copy it.
if self._filesystem.exists(results_file):
self._filesystem.copyfile(results_file, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != test_expectations.SKIP:
stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(
result.test_run_time * 1000), int(result.total_run_time * 1000))}
stats_trie = {}
for name, value in stats.iteritems():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
|
{
"content_hash": "b689190dada977418045b6208aaec632",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 171,
"avg_line_length": 48.332082551594745,
"alnum_prop": 0.6309149489538449,
"repo_name": "danakj/chromium",
"id": "67ca8bce188b6127c091266b79da0dc5c264aa6c",
"size": "27375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from rest_framework import status
from utils.testing_helpers import AuthenticatedAPITestCase
class FolderSelfTest(AuthenticatedAPITestCase):
def testSuccess(self):
# Create
response = self.client.post(reverse('folder-self-list'), {
'name': 'Test Folder'
}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
folder = response.data['id']
detail_url = reverse('folder-self-detail', args=(folder,))
# Add link to folder
response = self.client.post(reverse('link-self-list'), {
'url': 'https://learn.adafruit.com/adafruits-raspberry-pi-lesson-4-gpio-setup/configuring-i2c',
'note': 'Raspberry Pi GPIO setup tutorial',
'folder': folder
}, format='json')
# List
response = self.client.get(reverse('folder-self-list'), format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
# Detail
response = self.client.get(detail_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['name'], 'Test Folder')
self.assertEqual(len(response.data['links']), 1)
# Update name
response = self.client.patch(detail_url, {
'name': 'Something Else'
}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['name'], 'Something Else')
# Update is_public
response = self.client.patch(detail_url, {
'is_public': False
}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['is_public'], False)
# Update description
response = self.client.patch(detail_url, {
'description': 'This is the description'
}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['description'], 'This is the description')
# Delete
response = self.client.delete(detail_url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def testMissingName(self):
response = self.client.post(reverse('folder-self-list'), {
'description': 'This is the description'
}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def testDuplicateName(self):
self.client.post(reverse('folder-self-list'), {
'name': 'Test Folder',
'description': 'This is the description'
}, format='json')
response = self.client.post(reverse('folder-self-list'), {
'name': 'Test Folder'
}, format='json')
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
|
{
"content_hash": "23812692ac537b9310dd77dd47ba9dbc",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 107,
"avg_line_length": 38.37179487179487,
"alnum_prop": 0.6271299699298363,
"repo_name": "projectweekend/Links-API",
"id": "dfdfa80be57669d3fdb2e1cefc0ce284544bf6ed",
"size": "2993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "links/folder/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "146296"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/armor/composite/shared_armor_composite_boots.iff"
result.attribute_template_id = 0
result.stfName("wearables_name","armor_composite_boots")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "6fbd66921d536dc5ef6c61633638af6d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 95,
"avg_line_length": 26.23076923076923,
"alnum_prop": 0.7214076246334311,
"repo_name": "obi-two/Rebelion",
"id": "9b4e4bb17b61edb5e705be1d1ff9023c42cca6eb",
"size": "486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/wearables/armor/composite/shared_armor_composite_boots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from datetime import time
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import timezones
from pandas import (
DataFrame,
date_range,
)
import pandas._testing as tm
class TestAtTime:
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_at_time(self, tzstr, frame_or_series):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="H")
ts = frame_or_series(np.random.randn(len(rng)), index=rng)
ts_local = ts.tz_localize(tzstr)
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(tzstr)
tm.assert_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_at_time(self, frame_or_series):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
if frame_or_series is not DataFrame:
ts = ts[0]
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_equal(result, expected)
def test_at_time_midnight(self, frame_or_series):
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
if frame_or_series is not DataFrame:
ts = ts[0]
result = ts.at_time(time(0, 0))
tm.assert_equal(result, ts)
def test_at_time_nonexistent(self, frame_or_series):
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = DataFrame(np.random.randn(len(rng)), rng)
if frame_or_series is not DataFrame:
ts = ts[0]
rs = ts.at_time("16:00")
assert len(rs) == 0
@pytest.mark.parametrize(
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)]
)
def test_at_time_errors(self, hour):
# GH#24043
dti = date_range("2018", periods=3, freq="H")
df = DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, "tzinfo", None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH#24043
dti = date_range("2018", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern")))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self, frame_or_series):
# GH#20725
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
if frame_or_series is not DataFrame:
obj = obj[0]
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
obj.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ["index", 0]:
expected = ts.loc[indices, :]
elif axis in ["columns", 1]:
expected = ts.loc[:, indices]
result = ts.at_time("9:30", axis=axis)
# Without clearing freq, result has freq 1440T and expected 5T
result.index = result.index._with_freq(None)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
def test_at_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq="30min")
df = DataFrame(np.random.randn(len(index), 5), index=index)
akey = time(12, 0, 0)
ainds = [24, 72, 120, 168]
result = df.at_time(akey)
expected = df.loc[akey]
expected2 = df.iloc[ainds]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, expected2)
assert len(result) == 4
|
{
"content_hash": "960e8859ddba06b44f997fd7eb52f199",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 81,
"avg_line_length": 35.578125,
"alnum_prop": 0.5781730346947739,
"repo_name": "gfyoung/pandas",
"id": "2d05176d20f5f76fb0414fbd860524e81648ad98",
"size": "4554",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pandas/tests/frame/methods/test_at_time.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4912"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14336547"
},
{
"name": "Shell",
"bytes": "29174"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
}
|
import operator
from xadmin import widgets
from xadmin.util import get_fields_from_path, lookup_needs_distinct
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured, ValidationError
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.sql.query import LOOKUP_SEP, QUERY_TERMS
from django.template import loader
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from xadmin.filters import manager as filter_manager, FILTER_PREFIX, SEARCH_VAR, DateFieldListFilter, RelatedFieldSearchFilter
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
class IncorrectLookupParameters(Exception):
pass
class FilterPlugin(BaseAdminPlugin):
list_filter = ()
search_fields = ()
free_query_filter = True
def lookup_allowed(self, lookup, value):
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in model._meta.related_fkey_lookups:
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specificially included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
rel_name = None
for part in parts[:-1]:
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existants fields are ok, since they're ignored
# later.
return True
if field.is_relation:
model = field.related_model
rel_name = field.rel.get_related_field().name
elif isinstance(field, ForeignObjectRel):
model = field.model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
return clean_lookup in self.list_filter
def get_list_queryset(self, queryset):
lookup_params = dict([(smart_str(k)[len(FILTER_PREFIX):], v) for k, v in self.admin_view.params.items()
if smart_str(k).startswith(FILTER_PREFIX) and v != ''])
for p_key, p_val in lookup_params.iteritems():
if p_val == "False":
lookup_params[p_key] = False
use_distinct = False
# for clean filters
self.admin_view.has_query_param = bool(lookup_params)
self.admin_view.clean_query_url = self.admin_view.get_query_string(remove=
[k for k in self.request.GET.keys() if k.startswith(FILTER_PREFIX)])
# Normalize the types of keys
if not self.free_query_filter:
for key, value in lookup_params.items():
if not self.lookup_allowed(key, value):
raise SuspiciousOperation(
"Filtering by %s not allowed" % key)
self.filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(self.request, lookup_params,
self.model, self)
else:
field_path = None
field_parts = []
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, filter_manager.create
if not isinstance(field, models.Field):
field_path = field
field_parts = get_fields_from_path(
self.model, field_path)
field = field_parts[-1]
spec = field_list_filter_class(
field, self.request, lookup_params,
self.model, self.admin_view, field_path=field_path)
if len(field_parts)>1:
# Add related model name to title
spec.title = "%s %s"%(field_parts[-2].name,spec.title)
# Check if we need to use distinct()
use_distinct = (use_distinct or
lookup_needs_distinct(self.opts, field_path))
if spec and spec.has_output():
try:
new_qs = spec.do_filte(queryset)
except ValidationError, e:
new_qs = None
self.admin_view.message_user(_("<b>Filtering error:</b> %s") % e.messages[0], 'error')
if new_qs is not None:
queryset = new_qs
self.filter_specs.append(spec)
self.has_filters = bool(self.filter_specs)
self.admin_view.filter_specs = self.filter_specs
self.admin_view.used_filter_num = len(
filter(lambda f: f.is_used, self.filter_specs))
try:
for key, value in lookup_params.items():
use_distinct = (
use_distinct or lookup_needs_distinct(self.opts, key))
except FieldDoesNotExist, e:
raise IncorrectLookupParameters(e)
try:
queryset = queryset.filter(**lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
raise
except Exception, e:
raise IncorrectLookupParameters(e)
query = self.request.GET.get(SEARCH_VAR, '')
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
if self.search_fields and query:
orm_lookups = [construct_search(str(search_field))
for search_field in self.search_fields]
for bit in query.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
self.admin_view.search_query = query
if use_distinct:
return queryset.distinct()
else:
return queryset
# Media
def get_media(self, media):
if bool(filter(lambda s: isinstance(s, DateFieldListFilter), self.filter_specs)):
media = media + self.vendor('datepicker.css', 'datepicker.js',
'xadmin.widget.datetime.js')
if bool(filter(lambda s: isinstance(s, RelatedFieldSearchFilter), self.filter_specs)):
media = media + self.vendor(
'select.js', 'select.css', 'xadmin.widget.select.js')
return media + self.vendor('xadmin.plugin.filters.js')
# Block Views
def block_nav_menu(self, context, nodes):
if self.has_filters:
nodes.append(loader.render_to_string('xadmin/blocks/model_list.nav_menu.filters.html', context_instance=context))
def block_nav_form(self, context, nodes):
if self.search_fields:
nodes.append(
loader.render_to_string(
'xadmin/blocks/model_list.nav_form.search_form.html',
{'search_var': SEARCH_VAR,
'remove_search_url': self.admin_view.get_query_string(remove=[SEARCH_VAR]),
'search_form_params': self.admin_view.get_form_params(remove=[SEARCH_VAR])},
context_instance=context))
site.register_plugin(FilterPlugin, ListAdminView)
|
{
"content_hash": "5cfb1bbea029b21d30a0eb73d40dae96",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 143,
"avg_line_length": 43.598130841121495,
"alnum_prop": 0.5534833869239014,
"repo_name": "Mtax/xadmin-khb",
"id": "adfa70da9cadfc02b8741216ee0cd17f48b14219",
"size": "9330",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "xadmin/plugins/filters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "23733"
},
{
"name": "HTML",
"bytes": "95746"
},
{
"name": "JavaScript",
"bytes": "66284"
},
{
"name": "Python",
"bytes": "415078"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
}
|
""" Cisco_IOS_XE_mpls_ldp
This module contains a collection of YANG definitions
for the Cisco MPLS LDP configuration and operational data.
The configuration is held in the mpls\-ldp\-config container
which is broken into the following sections\:
1) global\-cfg contains configuration applicable to the entire
LSR.
2) nbr\-table contains configuration for specific LDP neighbors
or peers.
3) passwords contains configuration regarding passwords, both
local and those to be used with specific neighbors.
4) label\-cfg contains the label allocation and advertisement
configuration and filters.
5) discovery contains the configuration for link\-hello and
targetted hello protocol parameters including
interface\-specific settings for transport.
6) graceful\-restart contains the configuration for the
graceful restart feature.
7) logging contains the configuration for ldp\-specific logs.
8) interfaces contains the configuration for each interface,
including and routing interactions specific to that
interface.
The operational state is held in the mpls\-ldp\-state container
which is broken the following sections\:
1) oper\-summary contains the summarized global state.
2) forwarding\-summary contains the summarized forwarding
state.
3) bindings\-summary contains the summarized forwarding state.
4) vrf provides the detailed state on a per VRF basis.
5) bindings\-advertise\-specs \- holds the advertisement
specification filters
6) discovery provides the LDP Discovery operational state.
7) forwarding provides summary information regarding LDP
forwarding setup and detailed information on the LDP
forwarding rewrites
8) bindings provides the detailed LDP Bindings of address to
label.
9) neighbors
The vrf\-table, provides the detailed state on a per VRF basis.
If the router only supports LDP in a single VRF then this table
will have a single entry using the vrf\-name 'default'.
Otherwise this table will have one entry for every VRF where
LDP is enabled on the device.
Each vrf includes\:
A list parameters used by the VRF
A capability table containing the capabilities exchanged with
each neighbor.
A table of backoff parameters used in this VRF.
The graceful restart information used between the local
device and the neighbors should any of them restart.
An AF\-table which holds all information for a given Address
Family. This is extensive and is described below.
The LDP ID used by the device for this vrf.
The AF\-table holds information for a given Address Family
such as\:
\- per\-interface state.
\- IGP synchronization data.
\- LDP bindings statistics.
\- LDP forwarding statistics.
Terms and Acronyms
FRR \- Fast Re\-Reroute
ICCP \- Inter\-Chassis Communication Protocol
LACP \- Link Aggregation Control Protocol
LDP \- Label Distribution Protocol
LER \- Label Edge Router
LFA \- Loop Free Alternative
LIB \- Label Information Base
LSR \- Label Switch Router
MPLS \- Multi\-Protocol Label Switching
PQ node \- A node which is a member of both the extended
P\-space and the Q\-space as defined in
draft\-ietf\-rtgwg\-rlfa\-node\-protection.
VRF \- Virtual Route Forwarding
Copyright (c) 2014, 2017 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class AdjStateEnum(Enum):
"""
AdjStateEnum
The current state of the session, all of the
states 0 to 5 are based on the state machine
for LDP adjacency peer.
.. data:: nonex = 0
LDP adjacency state: nonexistent.
.. data:: unsol_op_pdg = 1
LDP session state: unsolicited open pending.
.. data:: deferred = 2
LDP session state: deferred.
.. data:: estab = 3
LDP session state: established
.. data:: lib_exp_wait = 4
LDP session state: LIB expension wait.
.. data:: destroyed = 5
LDP session state: destroyed.
"""
nonex = 0
unsol_op_pdg = 1
deferred = 2
estab = 3
lib_exp_wait = 4
destroyed = 5
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['AdjStateEnum']
class AdvLabelTypeEnum(Enum):
"""
AdvLabelTypeEnum
This provides the configuration of the type of label to
advertise for matching prefixes and peers.
.. data:: use_lable = 1
Advertise the label for matching prefixes and peers.
.. data:: use_explicit = 2
Advertise explicit null for matching prefixes and peers.
.. data:: use_implicit = 3
Advertise imlicit null for matching prefixes and peers.
.. data:: none = 4
Do not advertise labels for matching prefixes and peers.
"""
use_lable = 1
use_explicit = 2
use_implicit = 3
none = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['AdvLabelTypeEnum']
class AfEnum(Enum):
"""
AfEnum
LDP Address Family
.. data:: ldp_af_none = 0
No Address Family
.. data:: ldp_af_ipv4 = 1
IPv4 AFI
.. data:: ldp_af_ipv6 = 2
IPv6 AFI
.. data:: ldp_af_ipv4_ipv6 = 3
Both IPv4/IPv6 AFIs
"""
ldp_af_none = 0
ldp_af_ipv4 = 1
ldp_af_ipv6 = 2
ldp_af_ipv4_ipv6 = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['AfEnum']
class AfIdEnum(Enum):
"""
AfIdEnum
LDP AF type
.. data:: ldp_af_id_none = 0
No Address Family
.. data:: ldp_af_id_ipv4 = 1
IPv4 AFI
.. data:: ldp_af_id_ipv6 = 2
IPv6 AFI
"""
ldp_af_id_none = 0
ldp_af_id_ipv4 = 1
ldp_af_id_ipv6 = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['AfIdEnum']
class DhcStateEnum(Enum):
"""
DhcStateEnum
This is the Directed Hello Control State Type.
.. data:: none = 0
There is no current Directed Hello Control State.
.. data:: dhc_active = 1
The Directed Hello is Active.
.. data:: dhc_passive = 2
The Directed Hello is Passive.
.. data:: dhc_active_passive = 3
The Directed Hello is both Active and Passive.
"""
none = 0
dhc_active = 1
dhc_passive = 2
dhc_active_passive = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['DhcStateEnum']
class IccpStateEnum(Enum):
"""
IccpStateEnum
This enum describes the ICCP state as defined by the
IETF in TBD.
.. data:: nonexistent = 1
This state is the starting point for the state machine.
It indicates that no ICCP connection exists and that
there's no LDP session established between the PEs.
.. data:: initialized = 2
This state indicates that an LDP session exists between
the PEs but LDP ICCP Capabilitiy have not yet been
exchanged between them.
.. data:: capsent = 3
This state indicates that an LDP session exists between
the PEs and that the local PE has avertized LDP ICCP
Capability to its peer.
.. data:: caprec = 4
This state indicates that an LDP session exists between
the PEs and that the local PE has both received and
advertized LDP ICCP Capability from/to its peer.
.. data:: connecting = 5
This state indicates that the local PE has initiated an
ICCP connection to its peer, and is awaiting its
response.
.. data:: operational = 6
This state indicates that the ICCP connection is
operational.
"""
nonexistent = 1
initialized = 2
capsent = 3
caprec = 4
connecting = 5
operational = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IccpStateEnum']
class IgpSyncStateEnum(Enum):
"""
IgpSyncStateEnum
This is the IGP Synchronization State.
.. data:: isync_ready = 0
Achieved
.. data:: isync_not_ready = 1
Not achieved
.. data:: isync_deferred = 2
Deferred due to interface delay or global
restart delay
"""
isync_ready = 0
isync_not_ready = 1
isync_deferred = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IgpSyncStateEnum']
class LocalLabelStateEnum(Enum):
"""
LocalLabelStateEnum
This id the MPLS LDP Local Label State Type.
.. data:: local_label_state_none = 1
None
.. data:: local_label_state_assigned = 2
Assigned
.. data:: local_label_state_withdrawn = 3
Withdrawn
"""
local_label_state_none = 1
local_label_state_assigned = 2
local_label_state_withdrawn = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LocalLabelStateEnum']
class LoopDetectionTypeEnum(Enum):
"""
LoopDetectionTypeEnum
This specifies the type of loop detection either supported by
the LSR or enabled on the LSR.
.. data:: none = 1
Loop Detection is not enabled on this LSR.
.. data:: other = 2
Loop Detection is enabled but by a method
other than those defined.
.. data:: hop_count = 3
Loop Detection is supported by Hop Count only.
.. data:: path_vector = 4
Loop Detection is supported by Path Vector only.
.. data:: hop_count_and_path_vector = 5
Loop Detection is supported by both Hop Count
and Path Vector.
"""
none = 1
other = 2
hop_count = 3
path_vector = 4
hop_count_and_path_vector = 5
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LoopDetectionTypeEnum']
class NbrBgpAdvtStateEnum(Enum):
"""
NbrBgpAdvtStateEnum
MPLS LDP Neighbor BGP Label Advertisement State
Type.
.. data:: not_applicable = 0
BGP Label Advertisement is not applicable.
.. data:: permit = 1
BGP Label Advertisement is permitted.
.. data:: deny = 2
BGP Label Advertisement denied.
"""
not_applicable = 0
permit = 1
deny = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NbrBgpAdvtStateEnum']
class SessionStateEnum(Enum):
"""
SessionStateEnum
The current state of the session, all of the
states 1 to 5 are based on the state machine
for session negotiation behavior.
.. data:: nonexistent = 1
LDP session state: nonexistent.
.. data:: initialized = 2
LDP session state: initialized.
.. data:: openrec = 3
LDP session state: openrec.
.. data:: opensent = 4
LDP session state: opensent.
.. data:: operational = 5
LDP session state: operational.
"""
nonexistent = 1
initialized = 2
openrec = 3
opensent = 4
operational = 5
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['SessionStateEnum']
class LabelTypeIdentity(object):
"""
Base type for LDP Label Type
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LabelTypeIdentity']['meta_info']
class RoutePathLblOwnerIdentity(object):
"""
Base Route path label owner type.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RoutePathLblOwnerIdentity']['meta_info']
class NsrPeerSyncStateIdentity(object):
"""
Base identity for LDP NSR Peer Synchronization State.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrPeerSyncStateIdentity']['meta_info']
class RoutePathTypeIdentity(object):
"""
Base type for Route path type.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RoutePathTypeIdentity']['meta_info']
class NsrStatusIdentity(object):
"""
Base identity for Non\-Stop Routing State Type.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrStatusIdentity']['meta_info']
class IgpSyncDownReasonIdentity(object):
"""
Base identity reason IGP Sync was not achieved.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IgpSyncDownReasonIdentity']['meta_info']
class IcpmTypeIdentity(object):
"""
Base identity from which ICPM types can be derived. As this is
an extensible protocol, new types are expected.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IcpmTypeIdentity']['meta_info']
class NsrPeerSyncErrIdentity(object):
"""
Base for MPLS LDP NSR peer synchronization error types.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrPeerSyncErrIdentity']['meta_info']
class NsrSyncNackRsnIdentity(object):
"""
Base identity from which LDP Non\-Stop Routing peer LDP
synchronization nack reason identities are derived.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnIdentity']['meta_info']
class DownNbrReasonIdentity(object):
"""
Base identity for the reason a neighbor is down.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['DownNbrReasonIdentity']['meta_info']
class IccpTypeIdentity(object):
"""
Base identity from which ICCP types can be derived. As this is
an extensible protocol, new types are expected.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IccpTypeIdentity']['meta_info']
class MplsLdp(object):
"""
MPLS LDP configuration and operational data.
.. attribute:: mpls_ldp_config
MPLS LDP Configuration
**type**\: :py:class:`MplsLdpConfig <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig>`
.. attribute:: mpls_ldp_state
MPLS LDP operational data
**type**\: :py:class:`MplsLdpState <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.mpls_ldp_config = MplsLdp.MplsLdpConfig()
self.mpls_ldp_config.parent = self
self.mpls_ldp_state = MplsLdp.MplsLdpState()
self.mpls_ldp_state.parent = self
class MplsLdpState(object):
"""
MPLS LDP operational data.
.. attribute:: backoff_parameters
MPLS LDP Session Backoff Information
**type**\: :py:class:`BackoffParameters <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.BackoffParameters>`
.. attribute:: bindings
The detailed LDP Bindings
**type**\: :py:class:`Bindings <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Bindings>`
.. attribute:: bindings_summary
Aggregate counters for the MPLS LDP LIB
**type**\: :py:class:`BindingsSummary <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.BindingsSummary>`
.. attribute:: capabilities
LDP capability database information
**type**\: :py:class:`Capabilities <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Capabilities>`
.. attribute:: discovery
The LDP Discovery operational state
**type**\: :py:class:`Discovery <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Discovery>`
.. attribute:: forwarding
Summary information regarding LDP forwarding setup and detailed LDP Forwarding rewrites
**type**\: :py:class:`Forwarding <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding>`
.. attribute:: forwarding_summary
Summary information regarding LDP forwarding setup
**type**\: :py:class:`ForwardingSummary <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.ForwardingSummary>`
.. attribute:: graceful_restart
MPLS LDP Graceful Restart Information
**type**\: :py:class:`GracefulRestart <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.GracefulRestart>`
.. attribute:: icpm_summary_all
Summary info for LDP ICPM/ICCP
**type**\: :py:class:`IcpmSummaryAll <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.IcpmSummaryAll>`
.. attribute:: label_ranges
This contaions holds all the label ranges in use by this LDP instance
**type**\: :py:class:`LabelRanges <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.LabelRanges>`
.. attribute:: neighbors
The LDP Neighbors Information
**type**\: :py:class:`Neighbors <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors>`
.. attribute:: nsr_summary_all
This is the LDP NSR summary for the device
**type**\: :py:class:`NsrSummaryAll <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.NsrSummaryAll>`
.. attribute:: oper_summary
LDP operational data summary
**type**\: :py:class:`OperSummary <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.OperSummary>`
.. attribute:: parameters
MPLS LDP Global Parameters
**type**\: :py:class:`Parameters <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Parameters>`
.. attribute:: vrfs
MPLS LDP per\-VRF operational data
**type**\: :py:class:`Vrfs <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Vrfs>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.backoff_parameters = MplsLdp.MplsLdpState.BackoffParameters()
self.backoff_parameters.parent = self
self.bindings = MplsLdp.MplsLdpState.Bindings()
self.bindings.parent = self
self.bindings_summary = MplsLdp.MplsLdpState.BindingsSummary()
self.bindings_summary.parent = self
self.capabilities = MplsLdp.MplsLdpState.Capabilities()
self.capabilities.parent = self
self.discovery = MplsLdp.MplsLdpState.Discovery()
self.discovery.parent = self
self.forwarding = MplsLdp.MplsLdpState.Forwarding()
self.forwarding.parent = self
self.forwarding_summary = MplsLdp.MplsLdpState.ForwardingSummary()
self.forwarding_summary.parent = self
self.graceful_restart = MplsLdp.MplsLdpState.GracefulRestart()
self.graceful_restart.parent = self
self.icpm_summary_all = MplsLdp.MplsLdpState.IcpmSummaryAll()
self.icpm_summary_all.parent = self
self.label_ranges = MplsLdp.MplsLdpState.LabelRanges()
self.label_ranges.parent = self
self.neighbors = MplsLdp.MplsLdpState.Neighbors()
self.neighbors.parent = self
self.nsr_summary_all = MplsLdp.MplsLdpState.NsrSummaryAll()
self.nsr_summary_all.parent = self
self.oper_summary = MplsLdp.MplsLdpState.OperSummary()
self.oper_summary.parent = self
self.parameters = MplsLdp.MplsLdpState.Parameters()
self.parameters.parent = self
self.vrfs = MplsLdp.MplsLdpState.Vrfs()
self.vrfs.parent = self
class OperSummary(object):
"""
LDP operational data summary
.. attribute:: common
Common Summary information
**type**\: :py:class:`Common <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.OperSummary.Common>`
.. attribute:: no_of_ipv4_rib_tbl
Total number of ipv4 RIB tables
**type**\: int
**range:** 0..4294967295
.. attribute:: no_of_ipv4_rib_tbl_reg
Number of ipv4 RIB tables registered
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_autocfg_interfaces
Number of auto\-configured interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_fwd_ref_interfaces
Number of Forward Reference interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_interfaces
Number of known interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_vrf
Number of configured VRFs (including default)
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_vrf_oper
Number of configured operational VRFs (including default)
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.common = MplsLdp.MplsLdpState.OperSummary.Common()
self.common.parent = self
self.no_of_ipv4_rib_tbl = None
self.no_of_ipv4_rib_tbl_reg = None
self.number_of_autocfg_interfaces = None
self.number_of_fwd_ref_interfaces = None
self.number_of_interfaces = None
self.number_of_vrf = None
self.number_of_vrf_oper = None
class Common(object):
"""
Common Summary information
.. attribute:: address_families
Address Families enabled
**type**\: :py:class:`AfEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.AfEnum>`
.. attribute:: number_of_downstream_on_demand_neighbors
Number of Downstream\-On\-Demand neighbor
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_graceful_restart_neighbors
Number of Graceful Restart neighbor
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_ipv4_local_addresses
Number of IPv4 local addresses
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_ipv4_routes
Number of resolved IPv4 routes
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_ipv4ldp_interfaces
Number of LDP IPv4 configured interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_ldp_interfaces
Number of LDP configured interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_neighbors
Number of neighbor
**type**\: int
**range:** 0..4294967295
.. attribute:: numberof_ipv4_hello_adj
Number of LDP discovery IPv4 hello adjacencies
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.address_families = None
self.number_of_downstream_on_demand_neighbors = None
self.number_of_graceful_restart_neighbors = None
self.number_of_ipv4_local_addresses = None
self.number_of_ipv4_routes = None
self.number_of_ipv4ldp_interfaces = None
self.number_of_ldp_interfaces = None
self.number_of_neighbors = None
self.numberof_ipv4_hello_adj = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:oper-summary/Cisco-IOS-XE-mpls-ldp:common'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.address_families is not None:
return True
if self.number_of_downstream_on_demand_neighbors is not None:
return True
if self.number_of_graceful_restart_neighbors is not None:
return True
if self.number_of_ipv4_local_addresses is not None:
return True
if self.number_of_ipv4_routes is not None:
return True
if self.number_of_ipv4ldp_interfaces is not None:
return True
if self.number_of_ldp_interfaces is not None:
return True
if self.number_of_neighbors is not None:
return True
if self.numberof_ipv4_hello_adj is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.OperSummary.Common']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:oper-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.common is not None and self.common._has_data():
return True
if self.no_of_ipv4_rib_tbl is not None:
return True
if self.no_of_ipv4_rib_tbl_reg is not None:
return True
if self.number_of_autocfg_interfaces is not None:
return True
if self.number_of_fwd_ref_interfaces is not None:
return True
if self.number_of_interfaces is not None:
return True
if self.number_of_vrf is not None:
return True
if self.number_of_vrf_oper is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.OperSummary']['meta_info']
class ForwardingSummary(object):
"""
Summary information regarding LDP forwarding
setup
.. attribute:: intfs_fwd_count
MPLS forwarding enabled interface count
**type**\: int
**range:** 0..65535
.. attribute:: local_lbls
Local label allocated count
**type**\: int
**range:** 0..65535
.. attribute:: nhs
MPLS LDP forwarding rewrite next\-hop/path summary
**type**\: :py:class:`Nhs <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.ForwardingSummary.Nhs>`
.. attribute:: pfxs
MPLS LDP forwarding prefix rewrite summary
**type**\: :py:class:`Pfxs <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.ForwardingSummary.Pfxs>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.intfs_fwd_count = None
self.local_lbls = None
self.nhs = MplsLdp.MplsLdpState.ForwardingSummary.Nhs()
self.nhs.parent = self
self.pfxs = MplsLdp.MplsLdpState.ForwardingSummary.Pfxs()
self.pfxs.parent = self
class Pfxs(object):
"""
MPLS LDP forwarding prefix rewrite summary
.. attribute:: ecmp_pfxs
Count of prefixes with ECMP
**type**\: int
**range:** 0..65535
.. attribute:: labeled_pfxs_aggr
Labeled prefix count for all paths
**type**\: :py:class:`LabeledPfxsAggr <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.ForwardingSummary.Pfxs.LabeledPfxsAggr>`
.. attribute:: labeled_pfxs_backup
Labeled prefix count related to backup paths only
**type**\: :py:class:`LabeledPfxsBackup <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.ForwardingSummary.Pfxs.LabeledPfxsBackup>`
.. attribute:: labeled_pfxs_primary
Labeled prefix count related to primary paths only
**type**\: :py:class:`LabeledPfxsPrimary <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.ForwardingSummary.Pfxs.LabeledPfxsPrimary>`
.. attribute:: protected_pfxs
Count of FRR protected prefixes
**type**\: int
**range:** 0..65535
.. attribute:: total_pfxs
Total Prefix count
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.ecmp_pfxs = None
self.labeled_pfxs_aggr = MplsLdp.MplsLdpState.ForwardingSummary.Pfxs.LabeledPfxsAggr()
self.labeled_pfxs_aggr.parent = self
self.labeled_pfxs_backup = MplsLdp.MplsLdpState.ForwardingSummary.Pfxs.LabeledPfxsBackup()
self.labeled_pfxs_backup.parent = self
self.labeled_pfxs_primary = MplsLdp.MplsLdpState.ForwardingSummary.Pfxs.LabeledPfxsPrimary()
self.labeled_pfxs_primary.parent = self
self.protected_pfxs = None
self.total_pfxs = None
class LabeledPfxsAggr(object):
"""
Labeled prefix count for all paths
.. attribute:: labeled_pfxs
Count of labeled prefixes with 1 or more paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: labeled_pfxs_partial
Count of labeled prefixes with some (but not ALL) paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: unlabeled_pfxs
Count of labeled prefixes with ALL paths unlabeled
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.labeled_pfxs = None
self.labeled_pfxs_partial = None
self.unlabeled_pfxs = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:forwarding-summary/Cisco-IOS-XE-mpls-ldp:pfxs/Cisco-IOS-XE-mpls-ldp:labeled-pfxs-aggr'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.labeled_pfxs is not None:
return True
if self.labeled_pfxs_partial is not None:
return True
if self.unlabeled_pfxs is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.ForwardingSummary.Pfxs.LabeledPfxsAggr']['meta_info']
class LabeledPfxsPrimary(object):
"""
Labeled prefix count related to primary paths
only
.. attribute:: labeled_pfxs
Count of labeled prefixes with 1 or more paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: labeled_pfxs_partial
Count of labeled prefixes with some (but not ALL) paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: unlabeled_pfxs
Count of labeled prefixes with ALL paths unlabeled
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.labeled_pfxs = None
self.labeled_pfxs_partial = None
self.unlabeled_pfxs = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:forwarding-summary/Cisco-IOS-XE-mpls-ldp:pfxs/Cisco-IOS-XE-mpls-ldp:labeled-pfxs-primary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.labeled_pfxs is not None:
return True
if self.labeled_pfxs_partial is not None:
return True
if self.unlabeled_pfxs is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.ForwardingSummary.Pfxs.LabeledPfxsPrimary']['meta_info']
class LabeledPfxsBackup(object):
"""
Labeled prefix count related to backup paths
only
.. attribute:: labeled_pfxs
Count of labeled prefixes with 1 or more paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: labeled_pfxs_partial
Count of labeled prefixes with some (but not ALL) paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: unlabeled_pfxs
Count of labeled prefixes with ALL paths unlabeled
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.labeled_pfxs = None
self.labeled_pfxs_partial = None
self.unlabeled_pfxs = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:forwarding-summary/Cisco-IOS-XE-mpls-ldp:pfxs/Cisco-IOS-XE-mpls-ldp:labeled-pfxs-backup'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.labeled_pfxs is not None:
return True
if self.labeled_pfxs_partial is not None:
return True
if self.unlabeled_pfxs is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.ForwardingSummary.Pfxs.LabeledPfxsBackup']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:forwarding-summary/Cisco-IOS-XE-mpls-ldp:pfxs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.ecmp_pfxs is not None:
return True
if self.labeled_pfxs_aggr is not None and self.labeled_pfxs_aggr._has_data():
return True
if self.labeled_pfxs_backup is not None and self.labeled_pfxs_backup._has_data():
return True
if self.labeled_pfxs_primary is not None and self.labeled_pfxs_primary._has_data():
return True
if self.protected_pfxs is not None:
return True
if self.total_pfxs is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.ForwardingSummary.Pfxs']['meta_info']
class Nhs(object):
"""
MPLS LDP forwarding rewrite next\-hop/path summary
.. attribute:: backup_paths
Count of non\-primary backup paths
**type**\: int
**range:** 0..4294967295
.. attribute:: labeled_backup_paths
Count of labeled backup paths
**type**\: int
**range:** 0..4294967295
.. attribute:: labeled_paths
Count of all labeled paths
**type**\: int
**range:** 0..4294967295
.. attribute:: protected_paths
Count of FRR protected paths
**type**\: int
**range:** 0..4294967295
.. attribute:: remote_backup_paths
Count of non\-primary remote backup paths
**type**\: int
**range:** 0..4294967295
.. attribute:: total_paths
Total path count
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.backup_paths = None
self.labeled_backup_paths = None
self.labeled_paths = None
self.protected_paths = None
self.remote_backup_paths = None
self.total_paths = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:forwarding-summary/Cisco-IOS-XE-mpls-ldp:nhs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.backup_paths is not None:
return True
if self.labeled_backup_paths is not None:
return True
if self.labeled_paths is not None:
return True
if self.protected_paths is not None:
return True
if self.remote_backup_paths is not None:
return True
if self.total_paths is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.ForwardingSummary.Nhs']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:forwarding-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.intfs_fwd_count is not None:
return True
if self.local_lbls is not None:
return True
if self.nhs is not None and self.nhs._has_data():
return True
if self.pfxs is not None and self.pfxs._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.ForwardingSummary']['meta_info']
class BindingsSummary(object):
"""
Aggregate counters for the MPLS LDP LIB.
.. attribute:: binding_local
Number of local bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_explicit_null
Number of local explicit null bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_implicit_null
Number of local implicit null bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_no_route
Local bindings with no route
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_non_null
Number of local non\-null bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_null
Number of local null bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_oor
This is the number of local bindings needing label but which hit the Out\-Of\-Resource condition
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_no_route
Bindings with no route
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_remote
Number of remote bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_total
Total bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: highest_allocated_label
Highest allocated label
**type**\: int
**range:** 0..4294967295
.. attribute:: lowest_allocated_label
Lowest allocated label
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.binding_local = None
self.binding_local_explicit_null = None
self.binding_local_implicit_null = None
self.binding_local_no_route = None
self.binding_local_non_null = None
self.binding_local_null = None
self.binding_local_oor = None
self.binding_no_route = None
self.binding_remote = None
self.binding_total = None
self.highest_allocated_label = None
self.lowest_allocated_label = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:bindings-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.binding_local is not None:
return True
if self.binding_local_explicit_null is not None:
return True
if self.binding_local_implicit_null is not None:
return True
if self.binding_local_no_route is not None:
return True
if self.binding_local_non_null is not None:
return True
if self.binding_local_null is not None:
return True
if self.binding_local_oor is not None:
return True
if self.binding_no_route is not None:
return True
if self.binding_remote is not None:
return True
if self.binding_total is not None:
return True
if self.highest_allocated_label is not None:
return True
if self.lowest_allocated_label is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.BindingsSummary']['meta_info']
class NsrSummaryAll(object):
"""
This is the LDP NSR summary for the device.
.. attribute:: nsr_sum_in_label_reqs_created
In label Request Records created
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_sum_in_label_reqs_freed
In label Request Records freed
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_sum_in_label_withdraw_created
In label Withdraw Records created
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_sum_in_label_withdraw_freed
In label Withdraw Records freed
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_sum_lcl_addr_withdraw_cleared
Local Address Withdraw cleared
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_sum_lcl_addr_withdraw_set
Local Address Withdraw set
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.nsr_sum_in_label_reqs_created = None
self.nsr_sum_in_label_reqs_freed = None
self.nsr_sum_in_label_withdraw_created = None
self.nsr_sum_in_label_withdraw_freed = None
self.nsr_sum_lcl_addr_withdraw_cleared = None
self.nsr_sum_lcl_addr_withdraw_set = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:nsr-summary-all'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.nsr_sum_in_label_reqs_created is not None:
return True
if self.nsr_sum_in_label_reqs_freed is not None:
return True
if self.nsr_sum_in_label_withdraw_created is not None:
return True
if self.nsr_sum_in_label_withdraw_freed is not None:
return True
if self.nsr_sum_lcl_addr_withdraw_cleared is not None:
return True
if self.nsr_sum_lcl_addr_withdraw_set is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.NsrSummaryAll']['meta_info']
class IcpmSummaryAll(object):
"""
Summary info for LDP ICPM/ICCP.
.. attribute:: iccp_rg_app_data_count
ICCP RG App Data count
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_rg_conn_count
ICCP RG Connect count
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_rg_disconn_count
ICCP RG Disconnect count
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_rg_notif_count
ICCP RG Notif count
**type**\: int
**range:** 0..4294967295
.. attribute:: icpm_rgid_table_info
This defines the ICPM RGID Table
**type**\: :py:class:`IcpmRgidTableInfo <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmRgidTableInfo>`
.. attribute:: icpm_session_table
This is a list of ICPM sessions
**type**\: :py:class:`IcpmSessionTable <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmSessionTable>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.iccp_rg_app_data_count = None
self.iccp_rg_conn_count = None
self.iccp_rg_disconn_count = None
self.iccp_rg_notif_count = None
self.icpm_rgid_table_info = MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmRgidTableInfo()
self.icpm_rgid_table_info.parent = self
self.icpm_session_table = MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmSessionTable()
self.icpm_session_table.parent = self
class IcpmRgidTableInfo(object):
"""
This defines the ICPM RGID Table
.. attribute:: red_group
This is the data for an individual ICPM Rredundandy Group,
**type**\: list of :py:class:`RedGroup <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmRgidTableInfo.RedGroup>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.red_group = YList()
self.red_group.parent = self
self.red_group.name = 'red_group'
class RedGroup(object):
"""
This is the data for an individual ICPM Rredundandy
Group,
.. attribute:: rg_id <key>
This is the ICPM RG identifier
**type**\: int
**range:** 0..4294967295
.. attribute:: icpm_protocols
This list contains all active icpm protocols
**type**\: list of :py:class:`IcpmProtocols <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmRgidTableInfo.RedGroup.IcpmProtocols>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.rg_id = None
self.icpm_protocols = YList()
self.icpm_protocols.parent = self
self.icpm_protocols.name = 'icpm_protocols'
class IcpmProtocols(object):
"""
This list contains all active icpm protocols.
.. attribute:: icpm_type <key>
ICPM Type
**type**\: :py:class:`IcpmTypeIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.IcpmTypeIdentity>`
.. attribute:: redun_groups
List of Redundancy Groups
**type**\: list of :py:class:`RedunGroups <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmRgidTableInfo.RedGroup.IcpmProtocols.RedunGroups>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.icpm_type = None
self.redun_groups = YList()
self.redun_groups.parent = self
self.redun_groups.name = 'redun_groups'
class RedunGroups(object):
"""
List of Redundancy Groups
.. attribute:: rg_id <key>
Redundancy Group Identifier
**type**\: int
**range:** 0..4294967295
.. attribute:: client_id
Client Identifier
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_apps
List of apps
**type**\: list of :py:class:`IccpApps <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmRgidTableInfo.RedGroup.IcpmProtocols.RedunGroups.IccpApps>`
.. attribute:: peer_id
LSR identifier
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: state
ICCP State
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.rg_id = None
self.client_id = None
self.iccp_apps = YList()
self.iccp_apps.parent = self
self.iccp_apps.name = 'iccp_apps'
self.peer_id = None
self.state = None
class IccpApps(object):
"""
List of apps
.. attribute:: iccp_app <key>
ICCP App Type
**type**\: :py:class:`IccpTypeIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.IccpTypeIdentity>`
.. attribute:: app_state
App State
**type**\: :py:class:`IccpStateEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.IccpStateEnum>`
.. attribute:: ptcl_ver
ICCP App Protocol Version
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.iccp_app = None
self.app_state = None
self.ptcl_ver = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.iccp_app is None:
raise YPYModelError('Key property iccp_app is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:iccp-apps[Cisco-IOS-XE-mpls-ldp:iccp-app = ' + str(self.iccp_app) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.iccp_app is not None:
return True
if self.app_state is not None:
return True
if self.ptcl_ver is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmRgidTableInfo.RedGroup.IcpmProtocols.RedunGroups.IccpApps']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.rg_id is None:
raise YPYModelError('Key property rg_id is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:redun-groups[Cisco-IOS-XE-mpls-ldp:rg-id = ' + str(self.rg_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rg_id is not None:
return True
if self.client_id is not None:
return True
if self.iccp_apps is not None:
for child_ref in self.iccp_apps:
if child_ref._has_data():
return True
if self.peer_id is not None:
return True
if self.state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmRgidTableInfo.RedGroup.IcpmProtocols.RedunGroups']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.icpm_type is None:
raise YPYModelError('Key property icpm_type is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:icpm-protocols[Cisco-IOS-XE-mpls-ldp:icpm-type = ' + str(self.icpm_type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.icpm_type is not None:
return True
if self.redun_groups is not None:
for child_ref in self.redun_groups:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmRgidTableInfo.RedGroup.IcpmProtocols']['meta_info']
@property
def _common_path(self):
if self.rg_id is None:
raise YPYModelError('Key property rg_id is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:icpm-summary-all/Cisco-IOS-XE-mpls-ldp:icpm-rgid-table-info/Cisco-IOS-XE-mpls-ldp:red-group[Cisco-IOS-XE-mpls-ldp:rg-id = ' + str(self.rg_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rg_id is not None:
return True
if self.icpm_protocols is not None:
for child_ref in self.icpm_protocols:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmRgidTableInfo.RedGroup']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:icpm-summary-all/Cisco-IOS-XE-mpls-ldp:icpm-rgid-table-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.red_group is not None:
for child_ref in self.red_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmRgidTableInfo']['meta_info']
class IcpmSessionTable(object):
"""
This is a list of ICPM sessions.
.. attribute:: session_table
ICPM LDP Session Table
**type**\: list of :py:class:`SessionTable <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmSessionTable.SessionTable>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.session_table = YList()
self.session_table.parent = self
self.session_table.name = 'session_table'
class SessionTable(object):
"""
ICPM LDP Session Table
.. attribute:: session_id <key>
This is the ICPM sesion identifier
**type**\: int
**range:** 0..4294967295
.. attribute:: icpm_protocols
This list contains all active icpm protocols
**type**\: list of :py:class:`IcpmProtocols <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmSessionTable.SessionTable.IcpmProtocols>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.session_id = None
self.icpm_protocols = YList()
self.icpm_protocols.parent = self
self.icpm_protocols.name = 'icpm_protocols'
class IcpmProtocols(object):
"""
This list contains all active icpm protocols.
.. attribute:: icpm_type <key>
ICPM Type
**type**\: :py:class:`IcpmTypeIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.IcpmTypeIdentity>`
.. attribute:: redun_groups
List of Redundancy Groups
**type**\: list of :py:class:`RedunGroups <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmSessionTable.SessionTable.IcpmProtocols.RedunGroups>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.icpm_type = None
self.redun_groups = YList()
self.redun_groups.parent = self
self.redun_groups.name = 'redun_groups'
class RedunGroups(object):
"""
List of Redundancy Groups
.. attribute:: rg_id <key>
Redundancy Group Identifier
**type**\: int
**range:** 0..4294967295
.. attribute:: client_id
Client Identifier
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_apps
List of apps
**type**\: list of :py:class:`IccpApps <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmSessionTable.SessionTable.IcpmProtocols.RedunGroups.IccpApps>`
.. attribute:: peer_id
LSR identifier
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: state
ICCP State
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.rg_id = None
self.client_id = None
self.iccp_apps = YList()
self.iccp_apps.parent = self
self.iccp_apps.name = 'iccp_apps'
self.peer_id = None
self.state = None
class IccpApps(object):
"""
List of apps
.. attribute:: iccp_app <key>
ICCP App Type
**type**\: :py:class:`IccpTypeIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.IccpTypeIdentity>`
.. attribute:: app_state
App State
**type**\: :py:class:`IccpStateEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.IccpStateEnum>`
.. attribute:: ptcl_ver
ICCP App Protocol Version
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.iccp_app = None
self.app_state = None
self.ptcl_ver = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.iccp_app is None:
raise YPYModelError('Key property iccp_app is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:iccp-apps[Cisco-IOS-XE-mpls-ldp:iccp-app = ' + str(self.iccp_app) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.iccp_app is not None:
return True
if self.app_state is not None:
return True
if self.ptcl_ver is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmSessionTable.SessionTable.IcpmProtocols.RedunGroups.IccpApps']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.rg_id is None:
raise YPYModelError('Key property rg_id is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:redun-groups[Cisco-IOS-XE-mpls-ldp:rg-id = ' + str(self.rg_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rg_id is not None:
return True
if self.client_id is not None:
return True
if self.iccp_apps is not None:
for child_ref in self.iccp_apps:
if child_ref._has_data():
return True
if self.peer_id is not None:
return True
if self.state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmSessionTable.SessionTable.IcpmProtocols.RedunGroups']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.icpm_type is None:
raise YPYModelError('Key property icpm_type is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:icpm-protocols[Cisco-IOS-XE-mpls-ldp:icpm-type = ' + str(self.icpm_type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.icpm_type is not None:
return True
if self.redun_groups is not None:
for child_ref in self.redun_groups:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmSessionTable.SessionTable.IcpmProtocols']['meta_info']
@property
def _common_path(self):
if self.session_id is None:
raise YPYModelError('Key property session_id is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:icpm-summary-all/Cisco-IOS-XE-mpls-ldp:icpm-session-table/Cisco-IOS-XE-mpls-ldp:session-table[Cisco-IOS-XE-mpls-ldp:session-id = ' + str(self.session_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session_id is not None:
return True
if self.icpm_protocols is not None:
for child_ref in self.icpm_protocols:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmSessionTable.SessionTable']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:icpm-summary-all/Cisco-IOS-XE-mpls-ldp:icpm-session-table'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session_table is not None:
for child_ref in self.session_table:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.IcpmSummaryAll.IcpmSessionTable']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:icpm-summary-all'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.iccp_rg_app_data_count is not None:
return True
if self.iccp_rg_conn_count is not None:
return True
if self.iccp_rg_disconn_count is not None:
return True
if self.iccp_rg_notif_count is not None:
return True
if self.icpm_rgid_table_info is not None and self.icpm_rgid_table_info._has_data():
return True
if self.icpm_session_table is not None and self.icpm_session_table._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.IcpmSummaryAll']['meta_info']
class Parameters(object):
"""
MPLS LDP Global Parameters
.. attribute:: address_family_parameter
Per AF parameters
**type**\: list of :py:class:`AddressFamilyParameter <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Parameters.AddressFamilyParameter>`
.. attribute:: af_binding_withdraw_delay
Delay (sec) in Binding Withdrawal for an Address Family
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: discovery_quick_start_disabled_on_interfaces
Discovery quick\-start disabled on some enabled interfaces
**type**\: bool
.. attribute:: dod_max_hop
Maximum number of hops for Downstream\-on\-Demand
**type**\: int
**range:** 0..4294967295
.. attribute:: feature
This entry describes an LDP feature available on the device. This does not indicate whether the feature is enabled, just the raw ability to support the feature. The features may include, but are not limited to\: 'Auto\-Configuration', 'Basic', 'ICPM', 'IP\-over\-MPLS', 'IGP\-Sync', 'LLAF', 'TCP\-MD5\-Rollover', 'TDP', and 'NSR'
**type**\: list of str
.. attribute:: global_md5_password_enabled
Global MD5 password enabled
**type**\: bool
.. attribute:: keepalive_interval
Keepalive interval in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: le_no_route_timeout
LIB entry no route timeout in second
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: loop_detection
A indication of whether this LSR has enabled loop detection. Since Loop Detection is determined during Session Initialization, an individual session may not be running with loop detection. This object simply gives an indication of whether or not the LSR has the ability enabled to support Loop Detection and which types
**type**\: :py:class:`LoopDetectionTypeEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.LoopDetectionTypeEnum>`
.. attribute:: max_intf_attached
Maximum number of LDP enabled attached interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: max_intf_te
Maximum number of LDP enabled TE interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: max_peer
Maximum number of LDP peers
**type**\: int
**range:** 0..4294967295
.. attribute:: out_of_mem_state
This is a counter of the number of times LDP attempted to create a label or binding and failed due a memory allocation failure
**type**\: int
**range:** 0..4294967295
.. attribute:: protocol_version
Protocol version
**type**\: int
**range:** 0..4294967295
.. attribute:: session_hold_time
Session hold time in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.address_family_parameter = YList()
self.address_family_parameter.parent = self
self.address_family_parameter.name = 'address_family_parameter'
self.af_binding_withdraw_delay = None
self.discovery_quick_start_disabled_on_interfaces = None
self.dod_max_hop = None
self.feature = YLeafList()
self.feature.parent = self
self.feature.name = 'feature'
self.global_md5_password_enabled = None
self.keepalive_interval = None
self.le_no_route_timeout = None
self.loop_detection = None
self.max_intf_attached = None
self.max_intf_te = None
self.max_peer = None
self.out_of_mem_state = None
self.protocol_version = None
self.session_hold_time = None
class AddressFamilyParameter(object):
"""
Per AF parameters
.. attribute:: address_family <key>
Address Family
**type**\: :py:class:`AfEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.AfEnum>`
.. attribute:: discovery_transport_address
This is the Discovery transport address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: is_accepting_targeted_hellos
Accepting targeted Hellos
**type**\: bool
.. attribute:: targeted_hello_filter
This contains the filter name for targeted hellos. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.address_family = None
self.discovery_transport_address = None
self.is_accepting_targeted_hellos = None
self.targeted_hello_filter = None
@property
def _common_path(self):
if self.address_family is None:
raise YPYModelError('Key property address_family is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:parameters/Cisco-IOS-XE-mpls-ldp:address-family-parameter[Cisco-IOS-XE-mpls-ldp:address-family = ' + str(self.address_family) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.address_family is not None:
return True
if self.discovery_transport_address is not None:
return True
if self.is_accepting_targeted_hellos is not None:
return True
if self.targeted_hello_filter is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Parameters.AddressFamilyParameter']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:parameters'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.address_family_parameter is not None:
for child_ref in self.address_family_parameter:
if child_ref._has_data():
return True
if self.af_binding_withdraw_delay is not None:
return True
if self.discovery_quick_start_disabled_on_interfaces is not None:
return True
if self.dod_max_hop is not None:
return True
if self.feature is not None:
for child in self.feature:
if child is not None:
return True
if self.global_md5_password_enabled is not None:
return True
if self.keepalive_interval is not None:
return True
if self.le_no_route_timeout is not None:
return True
if self.loop_detection is not None:
return True
if self.max_intf_attached is not None:
return True
if self.max_intf_te is not None:
return True
if self.max_peer is not None:
return True
if self.out_of_mem_state is not None:
return True
if self.protocol_version is not None:
return True
if self.session_hold_time is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Parameters']['meta_info']
class Capabilities(object):
"""
LDP capability database information
.. attribute:: capability
Information on LDP capability
**type**\: list of :py:class:`Capability <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Capabilities.Capability>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.capability = YList()
self.capability.parent = self
self.capability.name = 'capability'
class Capability(object):
"""
Information on LDP capability
.. attribute:: cap_type <key>
Capability type (IANA assigned)
**type**\: int
**range:** 0..65535
.. attribute:: cap_des
Capability description
**type**\: str
**length:** 0..80
.. attribute:: capability_data
Capability data
**type**\: str
.. attribute:: capability_data_length
Capability data length
**type**\: int
**range:** 0..65535
.. attribute:: capability_owner
Capability owner
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.cap_type = None
self.cap_des = None
self.capability_data = None
self.capability_data_length = None
self.capability_owner = None
@property
def _common_path(self):
if self.cap_type is None:
raise YPYModelError('Key property cap_type is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:capabilities/Cisco-IOS-XE-mpls-ldp:capability[Cisco-IOS-XE-mpls-ldp:cap-type = ' + str(self.cap_type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cap_type is not None:
return True
if self.cap_des is not None:
return True
if self.capability_data is not None:
return True
if self.capability_data_length is not None:
return True
if self.capability_owner is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Capabilities.Capability']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:capabilities'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.capability is not None:
for child_ref in self.capability:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Capabilities']['meta_info']
class BackoffParameters(object):
"""
MPLS LDP Session Backoff Information
.. attribute:: backoff_seconds
Current backoff seconds count
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: initial_seconds
Initial backoff value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: maximum_seconds
Maximum backoff value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: waiting_seconds
Current backoff waiting seconds count
**type**\: int
**range:** 0..4294967295
**units**\: seconds
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.backoff_seconds = None
self.initial_seconds = None
self.maximum_seconds = None
self.waiting_seconds = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:backoff-parameters'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.backoff_seconds is not None:
return True
if self.initial_seconds is not None:
return True
if self.maximum_seconds is not None:
return True
if self.waiting_seconds is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.BackoffParameters']['meta_info']
class GracefulRestart(object):
"""
MPLS LDP Graceful Restart Information
.. attribute:: forwarding_state_hold_timer_remaining_seconds
Forwarding state hold timer remaining time in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: graceful_restart_forwarding_state_hold_time
Graceful restart forward state hold time in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: graceful_restart_reconnect_timeout
Reconnect timeout value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: is_forwarding_state_hold_timer_running
Is graceful restart forwarding state hold timer running
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: is_graceful_restart_configured
Is graceful restart configured
**type**\: bool
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.forwarding_state_hold_timer_remaining_seconds = None
self.graceful_restart_forwarding_state_hold_time = None
self.graceful_restart_reconnect_timeout = None
self.is_forwarding_state_hold_timer_running = None
self.is_graceful_restart_configured = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:graceful-restart'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.forwarding_state_hold_timer_remaining_seconds is not None:
return True
if self.graceful_restart_forwarding_state_hold_time is not None:
return True
if self.graceful_restart_reconnect_timeout is not None:
return True
if self.is_forwarding_state_hold_timer_running is not None:
return True
if self.is_graceful_restart_configured is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.GracefulRestart']['meta_info']
class Vrfs(object):
"""
MPLS LDP per\-VRF operational data.
.. attribute:: vrf
MPLS LDP Operational data for a given VRF
**type**\: list of :py:class:`Vrf <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Vrfs.Vrf>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf = YList()
self.vrf.parent = self
self.vrf.name = 'vrf'
class Vrf(object):
"""
MPLS LDP Operational data for a given VRF.
.. attribute:: vrf_name <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: afs
Address Family specific operational data
**type**\: :py:class:`Afs <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Vrfs.Vrf.Afs>`
.. attribute:: vrf_summary
MPLS LDP per VRF summarized Information
**type**\: :py:class:`VrfSummary <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Vrfs.Vrf.VrfSummary>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf_name = None
self.afs = MplsLdp.MplsLdpState.Vrfs.Vrf.Afs()
self.afs.parent = self
self.vrf_summary = MplsLdp.MplsLdpState.Vrfs.Vrf.VrfSummary()
self.vrf_summary.parent = self
class VrfSummary(object):
"""
MPLS LDP per VRF summarized Information
.. attribute:: address_families
Address Families enabled
**type**\: :py:class:`AfEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.AfEnum>`
.. attribute:: number_of_downstream_on_demand_neighbors
Number of Downstream\-On\-Demand neighbor
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_graceful_restart_neighbors
Number of Graceful Restart neighbor
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_ipv4_local_addresses
Number of IPv4 local addresses
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_ipv4_routes
Number of resolved IPv4 routes
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_ipv4ldp_interfaces
Number of LDP IPv4 configured interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_ldp_interfaces
Number of LDP configured interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_neighbors
Number of neighbor
**type**\: int
**range:** 0..4294967295
.. attribute:: numberof_ipv4_hello_adj
Number of LDP discovery IPv4 hello adjacencies
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.address_families = None
self.number_of_downstream_on_demand_neighbors = None
self.number_of_graceful_restart_neighbors = None
self.number_of_ipv4_local_addresses = None
self.number_of_ipv4_routes = None
self.number_of_ipv4ldp_interfaces = None
self.number_of_ldp_interfaces = None
self.number_of_neighbors = None
self.numberof_ipv4_hello_adj = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:vrf-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.address_families is not None:
return True
if self.number_of_downstream_on_demand_neighbors is not None:
return True
if self.number_of_graceful_restart_neighbors is not None:
return True
if self.number_of_ipv4_local_addresses is not None:
return True
if self.number_of_ipv4_routes is not None:
return True
if self.number_of_ipv4ldp_interfaces is not None:
return True
if self.number_of_ldp_interfaces is not None:
return True
if self.number_of_neighbors is not None:
return True
if self.numberof_ipv4_hello_adj is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Vrfs.Vrf.VrfSummary']['meta_info']
class Afs(object):
"""
Address Family specific operational data
.. attribute:: af
MPLS LDP Operational data for this Address Family
**type**\: list of :py:class:`Af <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.af = YList()
self.af.parent = self
self.af.name = 'af'
class Af(object):
"""
MPLS LDP Operational data for this Address Family.
.. attribute:: af_name <key>
Address Family name
**type**\: :py:class:`AfEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.AfEnum>`
.. attribute:: igp
LDP IGP Synchronization related information
**type**\: :py:class:`Igp <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af.Igp>`
.. attribute:: interface_summary
This container holds a summary of information across all interfaces in this AF,
**type**\: :py:class:`InterfaceSummary <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af.InterfaceSummary>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.af_name = None
self.igp = MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af.Igp()
self.igp.parent = self
self.interface_summary = MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af.InterfaceSummary()
self.interface_summary.parent = self
class InterfaceSummary(object):
"""
This container holds a summary of information
across all interfaces in this AF,
.. attribute:: auto_config
Auto\-configured interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: auto_config_disabled
Autoconfigure disabled
**type**\: int
**range:** 0..4294967295
.. attribute:: auto_config_forward_reference_interfaces
Auto\-configured forward references
**type**\: int
**range:** 0..4294967295
.. attribute:: configured_attached_interface
Number of attached interfaces configured in LDP
**type**\: int
**range:** 0..4294967295
.. attribute:: configured_te_interface
Number of TE tunnel interfaces configured in LDP
**type**\: int
**range:** 0..4294967295
.. attribute:: forward_references
Number of forward referenced interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: known_ip_interface_count
Number of known IP Interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: known_ip_interface_ldp_enabled
Number of known IP Interfaces with LDP Enabled
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.auto_config = None
self.auto_config_disabled = None
self.auto_config_forward_reference_interfaces = None
self.configured_attached_interface = None
self.configured_te_interface = None
self.forward_references = None
self.known_ip_interface_count = None
self.known_ip_interface_ldp_enabled = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:interface-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.auto_config is not None:
return True
if self.auto_config_disabled is not None:
return True
if self.auto_config_forward_reference_interfaces is not None:
return True
if self.configured_attached_interface is not None:
return True
if self.configured_te_interface is not None:
return True
if self.forward_references is not None:
return True
if self.known_ip_interface_count is not None:
return True
if self.known_ip_interface_ldp_enabled is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af.InterfaceSummary']['meta_info']
class Igp(object):
"""
LDP IGP Synchronization related information
.. attribute:: sync
LDP\-IGP Synchronization related information for an interface
**type**\: list of :py:class:`Sync <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af.Igp.Sync>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.sync = YList()
self.sync.parent = self
self.sync.name = 'sync'
class Sync(object):
"""
LDP\-IGP Synchronization related information
for an interface
.. attribute:: interface <key>
This leaf contains the interface name for the IGP Synchronization information
**type**\: str
**refers to**\: :py:class:`name <ydk.models.ietf.ietf_interfaces.Interfaces.Interface>`
.. attribute:: delay_timer_remaining
Remaining timer (seconds) until expiry of sync delay timer
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: igp_sync_down_reason
Reason IGP Sync Not Achieved
**type**\: :py:class:`IgpSyncDownReasonIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.IgpSyncDownReasonIdentity>`
.. attribute:: igp_sync_state
IGP Sync state
**type**\: :py:class:`IgpSyncStateEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.IgpSyncStateEnum>`
.. attribute:: is_delay_timer_running
This is set when the sync delay timer running
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: peers
MPLS LDP IGP Sync Interface Peer Information
**type**\: list of :py:class:`Peers <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af.Igp.Sync.Peers>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.interface = None
self.delay_timer_remaining = None
self.igp_sync_down_reason = None
self.igp_sync_state = None
self.is_delay_timer_running = None
self.peers = YList()
self.peers.parent = self
self.peers.name = 'peers'
class Peers(object):
"""
MPLS LDP IGP Sync Interface Peer Information
.. attribute:: is_chkpt_created
This is set if this peer was created due to check\-pointing
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: is_gr_enabled
Is GR enabled session
**type**\: bool
.. attribute:: peer_id
Peer Identifier
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.is_chkpt_created = None
self.is_gr_enabled = None
self.peer_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:peers'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.is_chkpt_created is not None:
return True
if self.is_gr_enabled is not None:
return True
if self.peer_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af.Igp.Sync.Peers']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface is None:
raise YPYModelError('Key property interface is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:sync[Cisco-IOS-XE-mpls-ldp:interface = ' + str(self.interface) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface is not None:
return True
if self.delay_timer_remaining is not None:
return True
if self.igp_sync_down_reason is not None:
return True
if self.igp_sync_state is not None:
return True
if self.is_delay_timer_running is not None:
return True
if self.peers is not None:
for child_ref in self.peers:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af.Igp.Sync']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:igp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sync is not None:
for child_ref in self.sync:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af.Igp']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.af_name is None:
raise YPYModelError('Key property af_name is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:af[Cisco-IOS-XE-mpls-ldp:af-name = ' + str(self.af_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.af_name is not None:
return True
if self.igp is not None and self.igp._has_data():
return True
if self.interface_summary is not None and self.interface_summary._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Vrfs.Vrf.Afs.Af']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:afs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.af is not None:
for child_ref in self.af:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Vrfs.Vrf.Afs']['meta_info']
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:vrfs/Cisco-IOS-XE-mpls-ldp:vrf[Cisco-IOS-XE-mpls-ldp:vrf-name = ' + str(self.vrf_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.vrf_name is not None:
return True
if self.afs is not None and self.afs._has_data():
return True
if self.vrf_summary is not None and self.vrf_summary._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Vrfs.Vrf']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:vrfs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.vrf is not None:
for child_ref in self.vrf:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Vrfs']['meta_info']
class Discovery(object):
"""
The LDP Discovery operational state
.. attribute:: discovery_stats
MPLS LDP Discovery Summary Information
**type**\: :py:class:`DiscoveryStats <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Discovery.DiscoveryStats>`
.. attribute:: link_hello_state
This container holds information for LDP Discovery using non\-targeted Hellos. These are interface\-based hellos which form one or more adjacencies for each interface and also form adjacencies on multiple intefrfaces. Link Hellos can therefore form multiple adjacencies with the same peer
**type**\: :py:class:`LinkHelloState <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Discovery.LinkHelloState>`
.. attribute:: targeted_hellos
The LDP Discovery Targeted Hello state
**type**\: :py:class:`TargetedHellos <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Discovery.TargetedHellos>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.discovery_stats = MplsLdp.MplsLdpState.Discovery.DiscoveryStats()
self.discovery_stats.parent = self
self.link_hello_state = MplsLdp.MplsLdpState.Discovery.LinkHelloState()
self.link_hello_state.parent = self
self.targeted_hellos = MplsLdp.MplsLdpState.Discovery.TargetedHellos()
self.targeted_hellos.parent = self
class DiscoveryStats(object):
"""
MPLS LDP Discovery Summary Information
.. attribute:: num_of_active_ldp_interfaces
Number of active LDP enabled interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: num_of_ldp_interfaces
Total Number of LDP configured interfaces
**type**\: int
**range:** 0..4294967295
.. attribute:: num_of_lnk_disc_recv
Number of link hello discoveries in recv state
**type**\: int
**range:** 0..4294967295
.. attribute:: num_of_lnk_disc_xmit
Number of link hello discoveries in xmit state
**type**\: int
**range:** 0..4294967295
.. attribute:: num_of_tgt_disc_recv
Number of targeted hello discoveries in recv state
**type**\: int
**range:** 0..4294967295
.. attribute:: num_of_tgt_disc_xmit
Number of targeted hello discoveries in xmit state
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.num_of_active_ldp_interfaces = None
self.num_of_ldp_interfaces = None
self.num_of_lnk_disc_recv = None
self.num_of_lnk_disc_xmit = None
self.num_of_tgt_disc_recv = None
self.num_of_tgt_disc_xmit = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:discovery/Cisco-IOS-XE-mpls-ldp:discovery-stats'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.num_of_active_ldp_interfaces is not None:
return True
if self.num_of_ldp_interfaces is not None:
return True
if self.num_of_lnk_disc_recv is not None:
return True
if self.num_of_lnk_disc_xmit is not None:
return True
if self.num_of_tgt_disc_recv is not None:
return True
if self.num_of_tgt_disc_xmit is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Discovery.DiscoveryStats']['meta_info']
class LinkHelloState(object):
"""
This container holds information for LDP Discovery
using non\-targeted Hellos. These are interface\-based
hellos which form one or more adjacencies for each
interface and also form adjacencies on multiple
intefrfaces. Link Hellos can therefore form multiple
adjacencies with the same peer.
.. attribute:: link_hellos
Each entry represents a single LDP Hello Adjacency. An LDP Session can have one or more Hello Adjacencies
**type**\: list of :py:class:`LinkHellos <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Discovery.LinkHelloState.LinkHellos>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.link_hellos = YList()
self.link_hellos.parent = self
self.link_hellos.name = 'link_hellos'
class LinkHellos(object):
"""
Each entry represents a single LDP Hello Adjacency.
An LDP Session can have one or more Hello
Adjacencies.
.. attribute:: interface <key>
The Discovery Interface
**type**\: str
**refers to**\: :py:class:`name <ydk.models.ietf.ietf_interfaces.Interfaces.Interface>`
.. attribute:: nbr_transport_addr <key>
This is the MPLS LDP Hello Neighbor transport address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: hello_interval
Hello interval in seconds. This is the value used to send hello messages
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: hold_time_remaining
This is the MPLS LDP Hello Discovery expiry time in seconds. If the value of this object is 65535, this means that the hold time is infinite (i.e., wait forever). Otherwise, the time remaining for this Hello Adjacency to receive its next Hello Message. This interval will change when the 'next' Hello Message which corresponds to this Hello Adjacency is received unless it is infinite
**type**\: int
**range:** 0..65535
**units**\: seconds
.. attribute:: local_src_addr
MPLS LDP Discovery Local source address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: local_transport_addr
MPLS LDP Discovery Local transport address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: nbr_hold_time
The Hello hold time which is negotiated between the Entity and the Peer. The entity associated with this Hello Adjacency issues a proposed Hello Hold Time value in the EntityHelloHoldTimer object. The peer also proposes a value and this object represents the negotiated value. A value of 0 means the default, which is 15 seconds for Link Hellos and 45 seconds for Targeted Hellos. A value of 65535 indicates an infinite hold time
**type**\: int
**range:** 0..65535
.. attribute:: nbr_ldp_id
Neighbor LDP Identifier
**type**\: str
.. attribute:: nbr_src_addr
This is the MPLS LDP Hello Neighbor source address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: next_hello
Next hello due time in milliseconds
**type**\: int
**range:** 0..4294967295
**units**\: milliseconds
.. attribute:: session_up
Set when the session is up for this adjacency
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.interface = None
self.nbr_transport_addr = None
self.hello_interval = None
self.hold_time_remaining = None
self.local_src_addr = None
self.local_transport_addr = None
self.nbr_hold_time = None
self.nbr_ldp_id = None
self.nbr_src_addr = None
self.next_hello = None
self.session_up = None
@property
def _common_path(self):
if self.interface is None:
raise YPYModelError('Key property interface is None')
if self.nbr_transport_addr is None:
raise YPYModelError('Key property nbr_transport_addr is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:discovery/Cisco-IOS-XE-mpls-ldp:link-hello-state/Cisco-IOS-XE-mpls-ldp:link-hellos[Cisco-IOS-XE-mpls-ldp:interface = ' + str(self.interface) + '][Cisco-IOS-XE-mpls-ldp:nbr-transport-addr = ' + str(self.nbr_transport_addr) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface is not None:
return True
if self.nbr_transport_addr is not None:
return True
if self.hello_interval is not None:
return True
if self.hold_time_remaining is not None:
return True
if self.local_src_addr is not None:
return True
if self.local_transport_addr is not None:
return True
if self.nbr_hold_time is not None:
return True
if self.nbr_ldp_id is not None:
return True
if self.nbr_src_addr is not None:
return True
if self.next_hello is not None:
return True
if self.session_up is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Discovery.LinkHelloState.LinkHellos']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:discovery/Cisco-IOS-XE-mpls-ldp:link-hello-state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.link_hellos is not None:
for child_ref in self.link_hellos:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Discovery.LinkHelloState']['meta_info']
class TargetedHellos(object):
"""
The LDP Discovery Targeted Hello state.
.. attribute:: targeted_hello
The LDP targeted discovery information for a specific target. Targetted discovery creates a single adjacency between two addresses and not indiviual adjacencies across physical interfaces
**type**\: list of :py:class:`TargetedHello <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Discovery.TargetedHellos.TargetedHello>`
.. attribute:: targeted_hello_hold_time
Local Targeted hold time in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: targeted_hello_interval
Local Targeted Hello interval in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.targeted_hello = YList()
self.targeted_hello.parent = self
self.targeted_hello.name = 'targeted_hello'
self.targeted_hello_hold_time = None
self.targeted_hello_interval = None
class TargetedHello(object):
"""
The LDP targeted discovery information for a specific
target. Targetted discovery creates a single adjacency
between two addresses and not indiviual adjacencies
across physical interfaces.
.. attribute:: vrf_name <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: target_address <key>
The target IP Address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: hold_time_remaining
This is the MPLS LDP Hello Discovery expiry time in seconds. If the value of this object is 65535, this means that the hold time is infinite (i.e., wait forever). Otherwise, the time remaining for this Hello Adjacency to receive its next Hello Message. This interval will change when the 'next' Hello Message which corresponds to this Hello Adjacency is received unless it is infinite
**type**\: int
**range:** 0..65535
**units**\: seconds
.. attribute:: local_address
Local IP Address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: nbr_hold_time
The Hello hold time which is negotiated between the Entity and the Peer. The entity associated with this Hello Adjacency issues a proposed Hello Hold Time value in the EntityHelloHoldTimer object. The peer also proposes a value and this object represents the negotiated value. A value of 0 means the default, which is 15 seconds for Link Hellos and 45 seconds for Targeted Hellos. A value of 65535 indicates an infinite hold time
**type**\: int
**range:** 0..65535
.. attribute:: neighbor_ldp_identifier
Neighbor LDP Identifier
**type**\: str
.. attribute:: next_hello
Next hello due time in milliseconds
**type**\: int
**range:** 0..4294967295
**units**\: milliseconds
.. attribute:: state
This is the MPLS LDP Targeted Hello state
**type**\: :py:class:`DhcStateEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.DhcStateEnum>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf_name = None
self.target_address = None
self.hold_time_remaining = None
self.local_address = None
self.nbr_hold_time = None
self.neighbor_ldp_identifier = None
self.next_hello = None
self.state = None
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
if self.target_address is None:
raise YPYModelError('Key property target_address is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:discovery/Cisco-IOS-XE-mpls-ldp:targeted-hellos/Cisco-IOS-XE-mpls-ldp:targeted-hello[Cisco-IOS-XE-mpls-ldp:vrf-name = ' + str(self.vrf_name) + '][Cisco-IOS-XE-mpls-ldp:target-address = ' + str(self.target_address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.vrf_name is not None:
return True
if self.target_address is not None:
return True
if self.hold_time_remaining is not None:
return True
if self.local_address is not None:
return True
if self.nbr_hold_time is not None:
return True
if self.neighbor_ldp_identifier is not None:
return True
if self.next_hello is not None:
return True
if self.state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Discovery.TargetedHellos.TargetedHello']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:discovery/Cisco-IOS-XE-mpls-ldp:targeted-hellos'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.targeted_hello is not None:
for child_ref in self.targeted_hello:
if child_ref._has_data():
return True
if self.targeted_hello_hold_time is not None:
return True
if self.targeted_hello_interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Discovery.TargetedHellos']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:discovery'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.discovery_stats is not None and self.discovery_stats._has_data():
return True
if self.link_hello_state is not None and self.link_hello_state._has_data():
return True
if self.targeted_hellos is not None and self.targeted_hellos._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Discovery']['meta_info']
class Forwarding(object):
"""
Summary information regarding LDP forwarding
setup and detailed LDP Forwarding rewrites
.. attribute:: forwarding_detail
This leaf contain the individual LDP forwarding rewrite for a single prefix
**type**\: list of :py:class:`ForwardingDetail <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingDetail>`
.. attribute:: forwarding_vrf_summs
Summary of forwarding info for this VRF
**type**\: :py:class:`ForwardingVrfSumms <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.forwarding_detail = YList()
self.forwarding_detail.parent = self
self.forwarding_detail.name = 'forwarding_detail'
self.forwarding_vrf_summs = MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms()
self.forwarding_vrf_summs.parent = self
class ForwardingVrfSumms(object):
"""
Summary of forwarding info for this VRF.
.. attribute:: forwarding_vrf_summ
Summary of forwarding info for this VRF
**type**\: list of :py:class:`ForwardingVrfSumm <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.forwarding_vrf_summ = YList()
self.forwarding_vrf_summ.parent = self
self.forwarding_vrf_summ.name = 'forwarding_vrf_summ'
class ForwardingVrfSumm(object):
"""
Summary of forwarding info for this VRF.
.. attribute:: vrf_name <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: intfs_fwd_count
MPLS forwarding enabled interface count
**type**\: int
**range:** 0..65535
.. attribute:: local_lbls
Local label allocated count
**type**\: int
**range:** 0..65535
.. attribute:: nhs
MPLS LDP forwarding rewrite next\-hop/path summary
**type**\: :py:class:`Nhs <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Nhs>`
.. attribute:: pfxs
MPLS LDP forwarding prefix rewrite summary
**type**\: :py:class:`Pfxs <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf_name = None
self.intfs_fwd_count = None
self.local_lbls = None
self.nhs = MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Nhs()
self.nhs.parent = self
self.pfxs = MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs()
self.pfxs.parent = self
class Pfxs(object):
"""
MPLS LDP forwarding prefix rewrite summary
.. attribute:: ecmp_pfxs
Count of prefixes with ECMP
**type**\: int
**range:** 0..65535
.. attribute:: labeled_pfxs_aggr
Labeled prefix count for all paths
**type**\: :py:class:`LabeledPfxsAggr <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs.LabeledPfxsAggr>`
.. attribute:: labeled_pfxs_backup
Labeled prefix count related to backup paths only
**type**\: :py:class:`LabeledPfxsBackup <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs.LabeledPfxsBackup>`
.. attribute:: labeled_pfxs_primary
Labeled prefix count related to primary paths only
**type**\: :py:class:`LabeledPfxsPrimary <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs.LabeledPfxsPrimary>`
.. attribute:: protected_pfxs
Count of FRR protected prefixes
**type**\: int
**range:** 0..65535
.. attribute:: total_pfxs
Total Prefix count
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.ecmp_pfxs = None
self.labeled_pfxs_aggr = MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs.LabeledPfxsAggr()
self.labeled_pfxs_aggr.parent = self
self.labeled_pfxs_backup = MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs.LabeledPfxsBackup()
self.labeled_pfxs_backup.parent = self
self.labeled_pfxs_primary = MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs.LabeledPfxsPrimary()
self.labeled_pfxs_primary.parent = self
self.protected_pfxs = None
self.total_pfxs = None
class LabeledPfxsAggr(object):
"""
Labeled prefix count for all paths
.. attribute:: labeled_pfxs
Count of labeled prefixes with 1 or more paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: labeled_pfxs_partial
Count of labeled prefixes with some (but not ALL) paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: unlabeled_pfxs
Count of labeled prefixes with ALL paths unlabeled
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.labeled_pfxs = None
self.labeled_pfxs_partial = None
self.unlabeled_pfxs = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:labeled-pfxs-aggr'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.labeled_pfxs is not None:
return True
if self.labeled_pfxs_partial is not None:
return True
if self.unlabeled_pfxs is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs.LabeledPfxsAggr']['meta_info']
class LabeledPfxsPrimary(object):
"""
Labeled prefix count related to primary paths
only
.. attribute:: labeled_pfxs
Count of labeled prefixes with 1 or more paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: labeled_pfxs_partial
Count of labeled prefixes with some (but not ALL) paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: unlabeled_pfxs
Count of labeled prefixes with ALL paths unlabeled
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.labeled_pfxs = None
self.labeled_pfxs_partial = None
self.unlabeled_pfxs = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:labeled-pfxs-primary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.labeled_pfxs is not None:
return True
if self.labeled_pfxs_partial is not None:
return True
if self.unlabeled_pfxs is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs.LabeledPfxsPrimary']['meta_info']
class LabeledPfxsBackup(object):
"""
Labeled prefix count related to backup paths
only
.. attribute:: labeled_pfxs
Count of labeled prefixes with 1 or more paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: labeled_pfxs_partial
Count of labeled prefixes with some (but not ALL) paths labeled
**type**\: int
**range:** 0..65535
.. attribute:: unlabeled_pfxs
Count of labeled prefixes with ALL paths unlabeled
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.labeled_pfxs = None
self.labeled_pfxs_partial = None
self.unlabeled_pfxs = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:labeled-pfxs-backup'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.labeled_pfxs is not None:
return True
if self.labeled_pfxs_partial is not None:
return True
if self.unlabeled_pfxs is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs.LabeledPfxsBackup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:pfxs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.ecmp_pfxs is not None:
return True
if self.labeled_pfxs_aggr is not None and self.labeled_pfxs_aggr._has_data():
return True
if self.labeled_pfxs_backup is not None and self.labeled_pfxs_backup._has_data():
return True
if self.labeled_pfxs_primary is not None and self.labeled_pfxs_primary._has_data():
return True
if self.protected_pfxs is not None:
return True
if self.total_pfxs is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Pfxs']['meta_info']
class Nhs(object):
"""
MPLS LDP forwarding rewrite next\-hop/path summary
.. attribute:: backup_paths
Count of non\-primary backup paths
**type**\: int
**range:** 0..4294967295
.. attribute:: labeled_backup_paths
Count of labeled backup paths
**type**\: int
**range:** 0..4294967295
.. attribute:: labeled_paths
Count of all labeled paths
**type**\: int
**range:** 0..4294967295
.. attribute:: protected_paths
Count of FRR protected paths
**type**\: int
**range:** 0..4294967295
.. attribute:: remote_backup_paths
Count of non\-primary remote backup paths
**type**\: int
**range:** 0..4294967295
.. attribute:: total_paths
Total path count
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.backup_paths = None
self.labeled_backup_paths = None
self.labeled_paths = None
self.protected_paths = None
self.remote_backup_paths = None
self.total_paths = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:nhs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.backup_paths is not None:
return True
if self.labeled_backup_paths is not None:
return True
if self.labeled_paths is not None:
return True
if self.protected_paths is not None:
return True
if self.remote_backup_paths is not None:
return True
if self.total_paths is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm.Nhs']['meta_info']
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:forwarding/Cisco-IOS-XE-mpls-ldp:forwarding-vrf-summs/Cisco-IOS-XE-mpls-ldp:forwarding-vrf-summ[Cisco-IOS-XE-mpls-ldp:vrf-name = ' + str(self.vrf_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.vrf_name is not None:
return True
if self.intfs_fwd_count is not None:
return True
if self.local_lbls is not None:
return True
if self.nhs is not None and self.nhs._has_data():
return True
if self.pfxs is not None and self.pfxs._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms.ForwardingVrfSumm']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:forwarding/Cisco-IOS-XE-mpls-ldp:forwarding-vrf-summs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.forwarding_vrf_summ is not None:
for child_ref in self.forwarding_vrf_summ:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingVrfSumms']['meta_info']
class ForwardingDetail(object):
"""
This leaf contain the individual LDP forwarding rewrite
for a single prefix.
.. attribute:: vrf_name <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: prefix <key>
The IP Prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
.. attribute:: fwd_prefix
This is the MPLS LDP Forward IP Prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: paths
MPLS LDP Forwarding Path info
**type**\: list of :py:class:`Paths <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths>`
.. attribute:: prefix_length
Prefix length
**type**\: int
**range:** 0..255
.. attribute:: route
MPLS LDP Forwarding Route information
**type**\: :py:class:`Route <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Route>`
.. attribute:: table_id
Table ID associated with IP prefix
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf_name = None
self.prefix = None
self.fwd_prefix = None
self.paths = YList()
self.paths.parent = self
self.paths.name = 'paths'
self.prefix_length = None
self.route = MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Route()
self.route.parent = self
self.table_id = None
class Route(object):
"""
MPLS LDP Forwarding Route information
.. attribute:: forwarding_update_age
Last Forwarding update nanosec age
**type**\: int
**range:** 0..18446744073709551615
**units**\: nanoseconds
.. attribute:: forwarding_update_count
Number of forwarding updates
**type**\: int
**range:** 0..4294967295
.. attribute:: forwarding_update_timestamp
Last Forwarding update nanosec timestamp
**type**\: int
**range:** 0..18446744073709551615
**units**\: nanoseconds
.. attribute:: is_local_vrf_leaked
Is this route leaked across local VRFs?
**type**\: bool
.. attribute:: local_label
Local label
**type**\: int
**range:** 0..4294967295
.. attribute:: metric
Route metric
**type**\: int
**range:** 0..4294967295
.. attribute:: priority
Route priority
**type**\: int
**range:** 0..255
.. attribute:: routing_update_age
Last Routing update nanosec age
**type**\: int
**range:** 0..18446744073709551615
**units**\: nanoseconds
.. attribute:: routing_update_count
Number of routing updates
**type**\: int
**range:** 0..4294967295
.. attribute:: routing_update_timestamp
Last Routing update nanosec timestamp
**type**\: int
**range:** 0..18446744073709551615
**units**\: nanoseconds
.. attribute:: source
Route source protocol Id
**type**\: int
**range:** 0..65535
.. attribute:: type
Route type
**type**\: int
**range:** 0..65535
.. attribute:: version
Route RIB version
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.forwarding_update_age = None
self.forwarding_update_count = None
self.forwarding_update_timestamp = None
self.is_local_vrf_leaked = None
self.local_label = None
self.metric = None
self.priority = None
self.routing_update_age = None
self.routing_update_count = None
self.routing_update_timestamp = None
self.source = None
self.type = None
self.version = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:route'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.forwarding_update_age is not None:
return True
if self.forwarding_update_count is not None:
return True
if self.forwarding_update_timestamp is not None:
return True
if self.is_local_vrf_leaked is not None:
return True
if self.local_label is not None:
return True
if self.metric is not None:
return True
if self.priority is not None:
return True
if self.routing_update_age is not None:
return True
if self.routing_update_count is not None:
return True
if self.routing_update_timestamp is not None:
return True
if self.source is not None:
return True
if self.type is not None:
return True
if self.version is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Route']['meta_info']
class Paths(object):
"""
MPLS LDP Forwarding Path info
.. attribute:: mpls
MPLS LDP Forwarding Path MPLS information
**type**\: :py:class:`Mpls <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls>`
.. attribute:: routing
MPLS LDP Forwarding Path IP Routing information
**type**\: :py:class:`Routing <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Routing>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.mpls = MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls()
self.mpls.parent = self
self.routing = MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Routing()
self.routing.parent = self
class Routing(object):
"""
MPLS LDP Forwarding Path IP Routing information
.. attribute:: bkup_path_id
Backup path Id
**type**\: int
**range:** 0..255
.. attribute:: has_remote_lfa_bkup
This is true if the path has a remote LFA backup
**type**\: bool
.. attribute:: interface
This is the interface
**type**\: str
**refers to**\: :py:class:`name <ydk.models.ietf.ietf_interfaces.Interfaces.Interface>`
.. attribute:: load_metric
Path's load metric for load balancing
**type**\: int
**range:** 0..4294967295
.. attribute:: next_hop
This is the Next Hop address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: next_hop_table_id
Table ID for nexthop address
**type**\: int
**range:** 0..4294967295
.. attribute:: nexthop_id
Nexthop Identifier
**type**\: int
**range:** 0..4294967295
.. attribute:: nh_is_overriden
This is set when the nexthop is overriden by LDP
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: path_id
path Id
**type**\: int
**range:** 0..255
.. attribute:: path_type
Routing path type
**type**\: :py:class:`RoutePathTypeIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.RoutePathTypeIdentity>`
.. attribute:: remote_node_id
This is the Remote/PQ node address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.bkup_path_id = None
self.has_remote_lfa_bkup = None
self.interface = None
self.load_metric = None
self.next_hop = None
self.next_hop_table_id = None
self.nexthop_id = None
self.nh_is_overriden = None
self.path_id = None
self.path_type = None
self.remote_node_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:routing'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bkup_path_id is not None:
return True
if self.has_remote_lfa_bkup is not None:
return True
if self.interface is not None:
return True
if self.load_metric is not None:
return True
if self.next_hop is not None:
return True
if self.next_hop_table_id is not None:
return True
if self.nexthop_id is not None:
return True
if self.nh_is_overriden is not None:
return True
if self.path_id is not None:
return True
if self.path_type is not None:
return True
if self.remote_node_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Routing']['meta_info']
class Mpls(object):
"""
MPLS LDP Forwarding Path MPLS information
.. attribute:: mpls_outgoing_info
MPLS nexthop info
**type**\: :py:class:`MplsOutgoingInfo <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.MplsOutgoingInfo>`
.. attribute:: remote_lfa
MPLS LDP Forwarding Path Remote LFA\-FRR backup MPLS info
**type**\: :py:class:`RemoteLfa <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.RemoteLfa>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.mpls_outgoing_info = MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.MplsOutgoingInfo()
self.mpls_outgoing_info.parent = self
self.remote_lfa = MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.RemoteLfa()
self.remote_lfa.parent = self
class MplsOutgoingInfo(object):
"""
MPLS nexthop info
.. attribute:: is_from_graceful_restartable_neighbor
Is from a GR neighbor
**type**\: bool
.. attribute:: is_stale
Is the entry stale? This may happen during a graceful restart
**type**\: bool
.. attribute:: nexthop_peer_ldp_ident
Nexthop LDP peer
**type**\: :py:class:`NexthopPeerLdpIdent <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.MplsOutgoingInfo.NexthopPeerLdpIdent>`
.. attribute:: out_label
Outgoing label
**type**\: int
**range:** 0..4294967295
.. attribute:: out_label_owner
Outgoing label owner
**type**\: :py:class:`RoutePathLblOwnerIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.RoutePathLblOwnerIdentity>`
.. attribute:: out_label_type
Outgoing Label Type
**type**\: :py:class:`LabelTypeIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.LabelTypeIdentity>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.is_from_graceful_restartable_neighbor = None
self.is_stale = None
self.nexthop_peer_ldp_ident = MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.MplsOutgoingInfo.NexthopPeerLdpIdent()
self.nexthop_peer_ldp_ident.parent = self
self.out_label = None
self.out_label_owner = None
self.out_label_type = None
class NexthopPeerLdpIdent(object):
"""
Nexthop LDP peer
.. attribute:: label_space_id
Label space identifier
**type**\: int
**range:** 0..65535
.. attribute:: lsr_id
LSR identifier
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.label_space_id = None
self.lsr_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:nexthop-peer-ldp-ident'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.label_space_id is not None:
return True
if self.lsr_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.MplsOutgoingInfo.NexthopPeerLdpIdent']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:mpls-outgoing-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.is_from_graceful_restartable_neighbor is not None:
return True
if self.is_stale is not None:
return True
if self.nexthop_peer_ldp_ident is not None and self.nexthop_peer_ldp_ident._has_data():
return True
if self.out_label is not None:
return True
if self.out_label_owner is not None:
return True
if self.out_label_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.MplsOutgoingInfo']['meta_info']
class RemoteLfa(object):
"""
MPLS LDP Forwarding Path Remote LFA\-FRR backup
MPLS info
.. attribute:: has_remote_lfa_bkup
Whether path has remote LFA backup
**type**\: bool
.. attribute:: mpls_outgoing_info
Remote LFA MPLS nexthop info
**type**\: :py:class:`MplsOutgoingInfo <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.RemoteLfa.MplsOutgoingInfo>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.has_remote_lfa_bkup = None
self.mpls_outgoing_info = MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.RemoteLfa.MplsOutgoingInfo()
self.mpls_outgoing_info.parent = self
class MplsOutgoingInfo(object):
"""
Remote LFA MPLS nexthop info
.. attribute:: is_from_graceful_restartable_neighbor
Is from a GR neighbor
**type**\: bool
.. attribute:: is_stale
Is the entry stale? This may happen during a graceful restart
**type**\: bool
.. attribute:: nexthop_peer_ldp_ident
Nexthop LDP peer
**type**\: :py:class:`NexthopPeerLdpIdent <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.RemoteLfa.MplsOutgoingInfo.NexthopPeerLdpIdent>`
.. attribute:: out_label
Outgoing label
**type**\: int
**range:** 0..4294967295
.. attribute:: out_label_owner
Outgoing label owner
**type**\: :py:class:`RoutePathLblOwnerIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.RoutePathLblOwnerIdentity>`
.. attribute:: out_label_type
Outgoing Label Type
**type**\: :py:class:`LabelTypeIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.LabelTypeIdentity>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.is_from_graceful_restartable_neighbor = None
self.is_stale = None
self.nexthop_peer_ldp_ident = MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.RemoteLfa.MplsOutgoingInfo.NexthopPeerLdpIdent()
self.nexthop_peer_ldp_ident.parent = self
self.out_label = None
self.out_label_owner = None
self.out_label_type = None
class NexthopPeerLdpIdent(object):
"""
Nexthop LDP peer
.. attribute:: label_space_id
Label space identifier
**type**\: int
**range:** 0..65535
.. attribute:: lsr_id
LSR identifier
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.label_space_id = None
self.lsr_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:nexthop-peer-ldp-ident'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.label_space_id is not None:
return True
if self.lsr_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.RemoteLfa.MplsOutgoingInfo.NexthopPeerLdpIdent']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:mpls-outgoing-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.is_from_graceful_restartable_neighbor is not None:
return True
if self.is_stale is not None:
return True
if self.nexthop_peer_ldp_ident is not None and self.nexthop_peer_ldp_ident._has_data():
return True
if self.out_label is not None:
return True
if self.out_label_owner is not None:
return True
if self.out_label_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.RemoteLfa.MplsOutgoingInfo']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:remote-lfa'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.has_remote_lfa_bkup is not None:
return True
if self.mpls_outgoing_info is not None and self.mpls_outgoing_info._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls.RemoteLfa']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:mpls'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.mpls_outgoing_info is not None and self.mpls_outgoing_info._has_data():
return True
if self.remote_lfa is not None and self.remote_lfa._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths.Mpls']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:paths'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.mpls is not None and self.mpls._has_data():
return True
if self.routing is not None and self.routing._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingDetail.Paths']['meta_info']
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
if self.prefix is None:
raise YPYModelError('Key property prefix is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:forwarding/Cisco-IOS-XE-mpls-ldp:forwarding-detail[Cisco-IOS-XE-mpls-ldp:vrf-name = ' + str(self.vrf_name) + '][Cisco-IOS-XE-mpls-ldp:prefix = ' + str(self.prefix) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.vrf_name is not None:
return True
if self.prefix is not None:
return True
if self.fwd_prefix is not None:
return True
if self.paths is not None:
for child_ref in self.paths:
if child_ref._has_data():
return True
if self.prefix_length is not None:
return True
if self.route is not None and self.route._has_data():
return True
if self.table_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding.ForwardingDetail']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:forwarding'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.forwarding_detail is not None:
for child_ref in self.forwarding_detail:
if child_ref._has_data():
return True
if self.forwarding_vrf_summs is not None and self.forwarding_vrf_summs._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Forwarding']['meta_info']
class Bindings(object):
"""
The detailed LDP Bindings.
.. attribute:: binding
This list contains the MPLS LDP Label Bindings for each IP Prefix. Label bindings provide the local MPLS Label, a list of remote labels, any filters affecting advertisment of that filter, and a list of neighbors to which the label has been advertised
**type**\: list of :py:class:`Binding <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Bindings.Binding>`
.. attribute:: bindings_sum_afs
This container holds the bindings specific to this VRF and AF
**type**\: :py:class:`BindingsSumAfs <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Bindings.BindingsSumAfs>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.binding = YList()
self.binding.parent = self
self.binding.name = 'binding'
self.bindings_sum_afs = MplsLdp.MplsLdpState.Bindings.BindingsSumAfs()
self.bindings_sum_afs.parent = self
class BindingsSumAfs(object):
"""
This container holds the bindings specific to this VRF
and AF.
.. attribute:: binding_sum_af
Counters for the LDP Label Information Base for this VRF/AF
**type**\: list of :py:class:`BindingSumAf <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Bindings.BindingsSumAfs.BindingSumAf>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.binding_sum_af = YList()
self.binding_sum_af.parent = self
self.binding_sum_af.name = 'binding_sum_af'
class BindingSumAf(object):
"""
Counters for the LDP Label Information Base for this
VRF/AF.
.. attribute:: vrf_name <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: af_name <key>
Address Family name
**type**\: :py:class:`AfEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.AfEnum>`
.. attribute:: binding_local
Number of local bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_explicit_null
Number of local explicit null bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_implicit_null
Number of local implicit null bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_no_route
Local bindings with no route
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_non_null
Number of local non\-null bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_null
Number of local null bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_local_oor
This is the number of local bindings needing label but which hit the Out\-Of\-Resource condition
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_no_route
Bindings with no route
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_remote
Number of remote bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_total
Total bindings
**type**\: int
**range:** 0..4294967295
.. attribute:: highest_allocated_label
Highest allocated label
**type**\: int
**range:** 0..4294967295
.. attribute:: lowest_allocated_label
Lowest allocated label
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf_name = None
self.af_name = None
self.binding_local = None
self.binding_local_explicit_null = None
self.binding_local_implicit_null = None
self.binding_local_no_route = None
self.binding_local_non_null = None
self.binding_local_null = None
self.binding_local_oor = None
self.binding_no_route = None
self.binding_remote = None
self.binding_total = None
self.highest_allocated_label = None
self.lowest_allocated_label = None
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
if self.af_name is None:
raise YPYModelError('Key property af_name is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:bindings/Cisco-IOS-XE-mpls-ldp:bindings-sum-afs/Cisco-IOS-XE-mpls-ldp:binding-sum-af[Cisco-IOS-XE-mpls-ldp:vrf-name = ' + str(self.vrf_name) + '][Cisco-IOS-XE-mpls-ldp:af-name = ' + str(self.af_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.vrf_name is not None:
return True
if self.af_name is not None:
return True
if self.binding_local is not None:
return True
if self.binding_local_explicit_null is not None:
return True
if self.binding_local_implicit_null is not None:
return True
if self.binding_local_no_route is not None:
return True
if self.binding_local_non_null is not None:
return True
if self.binding_local_null is not None:
return True
if self.binding_local_oor is not None:
return True
if self.binding_no_route is not None:
return True
if self.binding_remote is not None:
return True
if self.binding_total is not None:
return True
if self.highest_allocated_label is not None:
return True
if self.lowest_allocated_label is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Bindings.BindingsSumAfs.BindingSumAf']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:bindings/Cisco-IOS-XE-mpls-ldp:bindings-sum-afs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.binding_sum_af is not None:
for child_ref in self.binding_sum_af:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Bindings.BindingsSumAfs']['meta_info']
class Binding(object):
"""
This list contains the MPLS LDP Label Bindings for each
IP Prefix. Label bindings provide the local MPLS Label,
a list of remote labels, any filters affecting
advertisment of that filter, and a list of neighbors to
which the label has been advertised.
.. attribute:: vrf_name <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: prefix <key>
This leaf contains the IP Prefix being bound
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
.. attribute:: advertise_lsr_filter
This contains the filter name for this binding's Advertise LSR. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
.. attribute:: advertise_prefix_filter
This contains the filter name for this binding's prefix. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
.. attribute:: config_enforced_local_label_value
Config/User enforced local label value
**type**\: bool
.. attribute:: fwd_prefix
This is the MPLS LDP Binding IP Prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: is_no_route
This is true if the MPLS LDP Binding has no route
**type**\: bool
.. attribute:: label_oor
This is true if the MPLS LDP Binding Label space is depleted, Out Of Resource. No new labels can be allocated
**type**\: bool
.. attribute:: le_local_binding_revision
This is the MPLS LDP Binding Local Binding revision
**type**\: int
**range:** 0..4294967295
.. attribute:: le_local_label_state
This is the MPLS LDP Binding Local label state
**type**\: :py:class:`LocalLabelStateEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.LocalLabelStateEnum>`
.. attribute:: local_label
This is the MPLS LDP Binding Local label
**type**\: int
**range:** 0..4294967295
.. attribute:: peers_advertised_to
Peers to which this entry is advertised
**type**\: list of :py:class:`PeersAdvertisedTo <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Bindings.Binding.PeersAdvertisedTo>`
.. attribute:: prefix_length
This is the MPLS LDP Binding Prefix Length
**type**\: int
**range:** 0..255
.. attribute:: remote_binding
MPLS LDP Remote Binding Information
**type**\: list of :py:class:`RemoteBinding <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Bindings.Binding.RemoteBinding>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf_name = None
self.prefix = None
self.advertise_lsr_filter = None
self.advertise_prefix_filter = None
self.config_enforced_local_label_value = None
self.fwd_prefix = None
self.is_no_route = None
self.label_oor = None
self.le_local_binding_revision = None
self.le_local_label_state = None
self.local_label = None
self.peers_advertised_to = YList()
self.peers_advertised_to.parent = self
self.peers_advertised_to.name = 'peers_advertised_to'
self.prefix_length = None
self.remote_binding = YList()
self.remote_binding.parent = self
self.remote_binding.name = 'remote_binding'
class RemoteBinding(object):
"""
MPLS LDP Remote Binding Information
.. attribute:: assigning_peer_ldp_ident
Assigning peer
**type**\: :py:class:`AssigningPeerLdpIdent <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Bindings.Binding.RemoteBinding.AssigningPeerLdpIdent>`
.. attribute:: is_stale
Is the entry stale
**type**\: bool
.. attribute:: remote_label
This is the remote Label
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.assigning_peer_ldp_ident = MplsLdp.MplsLdpState.Bindings.Binding.RemoteBinding.AssigningPeerLdpIdent()
self.assigning_peer_ldp_ident.parent = self
self.is_stale = None
self.remote_label = None
class AssigningPeerLdpIdent(object):
"""
Assigning peer
.. attribute:: label_space_id
Label space identifier
**type**\: int
**range:** 0..65535
.. attribute:: lsr_id
LSR identifier
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.label_space_id = None
self.lsr_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:assigning-peer-ldp-ident'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.label_space_id is not None:
return True
if self.lsr_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Bindings.Binding.RemoteBinding.AssigningPeerLdpIdent']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:remote-binding'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.assigning_peer_ldp_ident is not None and self.assigning_peer_ldp_ident._has_data():
return True
if self.is_stale is not None:
return True
if self.remote_label is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Bindings.Binding.RemoteBinding']['meta_info']
class PeersAdvertisedTo(object):
"""
Peers to which this entry is advertised.
.. attribute:: label_space_id
Label space identifier
**type**\: int
**range:** 0..65535
.. attribute:: lsr_id
LSR identifier
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.label_space_id = None
self.lsr_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:peers-advertised-to'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.label_space_id is not None:
return True
if self.lsr_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Bindings.Binding.PeersAdvertisedTo']['meta_info']
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
if self.prefix is None:
raise YPYModelError('Key property prefix is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:bindings/Cisco-IOS-XE-mpls-ldp:binding[Cisco-IOS-XE-mpls-ldp:vrf-name = ' + str(self.vrf_name) + '][Cisco-IOS-XE-mpls-ldp:prefix = ' + str(self.prefix) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.vrf_name is not None:
return True
if self.prefix is not None:
return True
if self.advertise_lsr_filter is not None:
return True
if self.advertise_prefix_filter is not None:
return True
if self.config_enforced_local_label_value is not None:
return True
if self.fwd_prefix is not None:
return True
if self.is_no_route is not None:
return True
if self.label_oor is not None:
return True
if self.le_local_binding_revision is not None:
return True
if self.le_local_label_state is not None:
return True
if self.local_label is not None:
return True
if self.peers_advertised_to is not None:
for child_ref in self.peers_advertised_to:
if child_ref._has_data():
return True
if self.prefix_length is not None:
return True
if self.remote_binding is not None:
for child_ref in self.remote_binding:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Bindings.Binding']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:bindings'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.binding is not None:
for child_ref in self.binding:
if child_ref._has_data():
return True
if self.bindings_sum_afs is not None and self.bindings_sum_afs._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Bindings']['meta_info']
class Neighbors(object):
"""
The LDP Neighbors Information
.. attribute:: backoffs
LDP Backoff Information
**type**\: :py:class:`Backoffs <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.Backoffs>`
.. attribute:: nbr_adjs
For this Neighbor, this is the list of adjacencies between the neighbor and the local node
**type**\: list of :py:class:`NbrAdjs <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.NbrAdjs>`
.. attribute:: neighbor
Information on a particular LDP neighbor
**type**\: list of :py:class:`Neighbor <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.Neighbor>`
.. attribute:: nsr_nbr_detail
This is the LDP NSR state for this neighbor
**type**\: :py:class:`NsrNbrDetail <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.NsrNbrDetail>`
.. attribute:: stats_info
MPLS LDP Statistics Information
**type**\: :py:class:`StatsInfo <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.StatsInfo>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.backoffs = MplsLdp.MplsLdpState.Neighbors.Backoffs()
self.backoffs.parent = self
self.nbr_adjs = YList()
self.nbr_adjs.parent = self
self.nbr_adjs.name = 'nbr_adjs'
self.neighbor = YList()
self.neighbor.parent = self
self.neighbor.name = 'neighbor'
self.nsr_nbr_detail = MplsLdp.MplsLdpState.Neighbors.NsrNbrDetail()
self.nsr_nbr_detail.parent = self
self.stats_info = MplsLdp.MplsLdpState.Neighbors.StatsInfo()
self.stats_info.parent = self
class Neighbor(object):
"""
Information on a particular LDP neighbor
.. attribute:: vrf_name <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: lsr_id <key>
LSR ID of neighbor
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: advertise_bgp_prefixes
True if BGP labeled prefixes are advertised to the neighbor
**type**\: bool
.. attribute:: bgp_advertisement_state
BGP labeled prefixes advertisement state
**type**\: :py:class:`NbrBgpAdvtStateEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.NbrBgpAdvtStateEnum>`
.. attribute:: capabilities
Capabilities sent to and received from neighbor
**type**\: :py:class:`Capabilities <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.Neighbor.Capabilities>`
.. attribute:: client
Targeted Session clients
**type**\: list of str
.. attribute:: downstream_on_demand
Is Label advertisement mode in Downstream On Demand mode or not?
**type**\: bool
.. attribute:: duplicate_address
Duplicate IPv4/IPv6 address bound to this peer
**type**\: one of the below types:
**type**\: list of str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: list of str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: graceful_restart_adjacency
This container holds the graceful restart information for this adjacency
**type**\: :py:class:`GracefulRestartAdjacency <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.Neighbor.GracefulRestartAdjacency>`
.. attribute:: has_sp
Session Protection enabled
**type**\: bool
.. attribute:: inbound_ipv4
This contains the IPv4 Inbound accept filter name. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
**length:** 0..80
.. attribute:: inbound_ipv6_filter
This contains the IPv6 Inbound accept filter name. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
**length:** 0..80
.. attribute:: label_space_id
Label space ID of neighbor
**type**\: int
**range:** 0..65535
.. attribute:: nbr_bound_address
This is the MPLS LDP Neighbor Bound IPv4/IPv6 Address
**type**\: one of the below types:
**type**\: list of str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: list of str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: nbr_path_vector_limit
If the value of this object is 0 (zero) then Loop Dection for Path Vectors for this neighor is disabled. Otherwise, if this object has a value greater than zero, then Loop Dection for Path Vectors for this neighbor is enabled and the Path Vector Limit is this value
**type**\: int
**range:** 0..255
.. attribute:: nbr_stats
Neighbor Statistics
**type**\: :py:class:`NbrStats <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.Neighbor.NbrStats>`
.. attribute:: outbound_ipv4_filter
This contains the IPv4 Outbound advertise filter name. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
**length:** 0..80
.. attribute:: outbound_ipv6_filter
This contains the IPv6 Outbound advertise filter name. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
**length:** 0..80
.. attribute:: peer_hold_time
Session holdtime value in seconds from the peer
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: peer_keep_alive_interval
Session keepalive interval in seconds from the peer
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: peer_state
LDP adjacency peer state
**type**\: :py:class:`AdjStateEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.AdjStateEnum>`
.. attribute:: session_prot_ver
The version of the LDP Protocol which this session is using. This is the version of the LDP protocol which has been negotiated during session initialization
**type**\: int
**range:** 1..65535
.. attribute:: session_role
During session establishment the LSR/LER takes either the active role or the passive role based on address comparisons. This object indicates whether this LSR/LER was behaving in an active role or passive role during this session's establishment. The value of unknown(1), indicates that the role is not able to be determined at the present time
**type**\: :py:class:`SessionRoleEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.Neighbor.SessionRoleEnum>`
.. attribute:: sp_duration
Session protection holdup time duration in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: sp_filter
This contains the Session Protection filter name. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
**length:** 0..80
.. attribute:: sp_has_duration
Session Protection has non\-default duration
**type**\: bool
.. attribute:: sp_state
Session Protection state
**type**\: str
**length:** 0..80
.. attribute:: spht_remaining
Session Protection holdup time remaining value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: spht_running
Session Protection holdup timer is running
**type**\: bool
.. attribute:: tcp_information
MPLS LDP Neighbor TCP Information
**type**\: :py:class:`TcpInformation <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.Neighbor.TcpInformation>`
.. attribute:: up_time_seconds
Up time in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf_name = None
self.lsr_id = None
self.advertise_bgp_prefixes = None
self.bgp_advertisement_state = None
self.capabilities = MplsLdp.MplsLdpState.Neighbors.Neighbor.Capabilities()
self.capabilities.parent = self
self.client = YLeafList()
self.client.parent = self
self.client.name = 'client'
self.downstream_on_demand = None
self.duplicate_address = YLeafList()
self.duplicate_address.parent = self
self.duplicate_address.name = 'duplicate_address'
self.graceful_restart_adjacency = MplsLdp.MplsLdpState.Neighbors.Neighbor.GracefulRestartAdjacency()
self.graceful_restart_adjacency.parent = self
self.has_sp = None
self.inbound_ipv4 = None
self.inbound_ipv6_filter = None
self.label_space_id = None
self.nbr_bound_address = YLeafList()
self.nbr_bound_address.parent = self
self.nbr_bound_address.name = 'nbr_bound_address'
self.nbr_path_vector_limit = None
self.nbr_stats = MplsLdp.MplsLdpState.Neighbors.Neighbor.NbrStats()
self.nbr_stats.parent = self
self.outbound_ipv4_filter = None
self.outbound_ipv6_filter = None
self.peer_hold_time = None
self.peer_keep_alive_interval = None
self.peer_state = None
self.session_prot_ver = None
self.session_role = None
self.sp_duration = None
self.sp_filter = None
self.sp_has_duration = None
self.sp_state = None
self.spht_remaining = None
self.spht_running = None
self.tcp_information = MplsLdp.MplsLdpState.Neighbors.Neighbor.TcpInformation()
self.tcp_information.parent = self
self.up_time_seconds = None
class SessionRoleEnum(Enum):
"""
SessionRoleEnum
During session establishment the LSR/LER takes either
the active role or the passive role based on address
comparisons. This object indicates whether this
LSR/LER was behaving in an active role or passive role
during this session's establishment.
The value of unknown(1), indicates that the role is not
able to be determined at the present time.
.. data:: unknown = 1
The role of this LSR in the session is unknown.
.. data:: active = 2
The role of this LSR in the session is active.
.. data:: passive = 3
The role of this LSR in the session is passive.
"""
unknown = 1
active = 2
passive = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.Neighbor.SessionRoleEnum']
class NbrStats(object):
"""
Neighbor Statistics.
.. attribute:: num_of_nbr_ipv4_addresses
Number of IPv4 addresses for which the neighbor is advertising labels
**type**\: int
**range:** 0..4294967295
.. attribute:: num_of_nbr_ipv4_discovery
Number of neighbor IPv4 discovery sources
**type**\: int
**range:** 0..4294967295
.. attribute:: num_of_nbr_ipv4_lbl
Number of IPv4 labels the neighbor is advertising
**type**\: int
**range:** 0..4294967295
.. attribute:: num_of_nbr_ipv6_addresses
Number of IPv6 addresses for which the neighbor is advertising labels
**type**\: int
**range:** 0..4294967295
.. attribute:: num_of_nbr_ipv6_discovery
Number of neighbor IPv6 discovery sources
**type**\: int
**range:** 0..4294967295
.. attribute:: num_of_nbr_ipv6_lbl
Number of IPv6 labels the neighbor is advertising
**type**\: int
**range:** 0..4294967295
.. attribute:: ta_pies_rcvd
Number of MPLS LDP messages received from this neighbor
**type**\: int
**range:** 0..4294967295
.. attribute:: ta_pies_sent
Number of MPLS LDP messages sent to this neighbor
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.num_of_nbr_ipv4_addresses = None
self.num_of_nbr_ipv4_discovery = None
self.num_of_nbr_ipv4_lbl = None
self.num_of_nbr_ipv6_addresses = None
self.num_of_nbr_ipv6_discovery = None
self.num_of_nbr_ipv6_lbl = None
self.ta_pies_rcvd = None
self.ta_pies_sent = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:nbr-stats'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.num_of_nbr_ipv4_addresses is not None:
return True
if self.num_of_nbr_ipv4_discovery is not None:
return True
if self.num_of_nbr_ipv4_lbl is not None:
return True
if self.num_of_nbr_ipv6_addresses is not None:
return True
if self.num_of_nbr_ipv6_discovery is not None:
return True
if self.num_of_nbr_ipv6_lbl is not None:
return True
if self.ta_pies_rcvd is not None:
return True
if self.ta_pies_sent is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.Neighbor.NbrStats']['meta_info']
class GracefulRestartAdjacency(object):
"""
This container holds the graceful restart information
for this adjacency.
.. attribute:: down_nbr_down_reason
This identity provides the reason that the LDP Session with this neighbor is down. The reason does not persist if the session was down but is now recovered
**type**\: :py:class:`DownNbrReasonIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.DownNbrReasonIdentity>`
.. attribute:: down_nbr_flap_count
This is the current count of back\-to\-back flaps
**type**\: int
**range:** 0..4294967295
.. attribute:: is_graceful_restartable
Is this neighbor graceful restartable?
**type**\: bool
.. attribute:: is_liveness_timer_running
This is set if the liveness timer is running
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: is_recovery_timer_running
This is set if the recovery timer is running
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: liveness_timer_remaining_seconds
Remaining time from liveness timer in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: reconnect_timeout
This leaf is the reconnect timeout in microseconds
**type**\: int
**range:** 0..4294967295
**units**\: microseconds
.. attribute:: recovery_time
This leaf is the recovery time in microseconds
**type**\: int
**range:** 0..4294967295
**units**\: microseconds
.. attribute:: recovery_timer_remaining_seconds
Recovery timer remaining time in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.down_nbr_down_reason = None
self.down_nbr_flap_count = None
self.is_graceful_restartable = None
self.is_liveness_timer_running = None
self.is_recovery_timer_running = None
self.liveness_timer_remaining_seconds = None
self.reconnect_timeout = None
self.recovery_time = None
self.recovery_timer_remaining_seconds = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:graceful-restart-adjacency'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.down_nbr_down_reason is not None:
return True
if self.down_nbr_flap_count is not None:
return True
if self.is_graceful_restartable is not None:
return True
if self.is_liveness_timer_running is not None:
return True
if self.is_recovery_timer_running is not None:
return True
if self.liveness_timer_remaining_seconds is not None:
return True
if self.reconnect_timeout is not None:
return True
if self.recovery_time is not None:
return True
if self.recovery_timer_remaining_seconds is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.Neighbor.GracefulRestartAdjacency']['meta_info']
class TcpInformation(object):
"""
MPLS LDP Neighbor TCP Information
.. attribute:: foreign_host
This is the foreign host address used by TCP
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: foreign_port
Foreign port number
**type**\: int
**range:** 0..65535
.. attribute:: is_md5_on
Is MD5 Digest on
**type**\: bool
.. attribute:: local_host
This is the local host address used by TCP
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: local_port
Local port number
**type**\: int
**range:** 0..65535
.. attribute:: up_time
up time
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.foreign_host = None
self.foreign_port = None
self.is_md5_on = None
self.local_host = None
self.local_port = None
self.up_time = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:tcp-information'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.foreign_host is not None:
return True
if self.foreign_port is not None:
return True
if self.is_md5_on is not None:
return True
if self.local_host is not None:
return True
if self.local_port is not None:
return True
if self.up_time is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.Neighbor.TcpInformation']['meta_info']
class Capabilities(object):
"""
Capabilities sent to and received from neighbor
.. attribute:: received_caps
List of received capabilities
**type**\: list of :py:class:`ReceivedCaps <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.Neighbor.Capabilities.ReceivedCaps>`
.. attribute:: sent_caps
List of sent capabilities
**type**\: list of :py:class:`SentCaps <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.Neighbor.Capabilities.SentCaps>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.received_caps = YList()
self.received_caps.parent = self
self.received_caps.name = 'received_caps'
self.sent_caps = YList()
self.sent_caps.parent = self
self.sent_caps.name = 'sent_caps'
class SentCaps(object):
"""
List of sent capabilities
.. attribute:: cap_type <key>
Capability type (IANA assigned)
**type**\: int
**range:** 0..65535
.. attribute:: cap_des
Capability description
**type**\: str
**length:** 0..80
.. attribute:: capability_data
Capability data
**type**\: str
.. attribute:: capability_data_length
Capability data length
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.cap_type = None
self.cap_des = None
self.capability_data = None
self.capability_data_length = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.cap_type is None:
raise YPYModelError('Key property cap_type is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:sent-caps[Cisco-IOS-XE-mpls-ldp:cap-type = ' + str(self.cap_type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cap_type is not None:
return True
if self.cap_des is not None:
return True
if self.capability_data is not None:
return True
if self.capability_data_length is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.Neighbor.Capabilities.SentCaps']['meta_info']
class ReceivedCaps(object):
"""
List of received capabilities
.. attribute:: cap_type <key>
Capability type (IANA assigned)
**type**\: int
**range:** 0..65535
.. attribute:: cap_des
Capability description
**type**\: str
**length:** 0..80
.. attribute:: capability_data
Capability data
**type**\: str
.. attribute:: capability_data_length
Capability data length
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.cap_type = None
self.cap_des = None
self.capability_data = None
self.capability_data_length = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.cap_type is None:
raise YPYModelError('Key property cap_type is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:received-caps[Cisco-IOS-XE-mpls-ldp:cap-type = ' + str(self.cap_type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cap_type is not None:
return True
if self.cap_des is not None:
return True
if self.capability_data is not None:
return True
if self.capability_data_length is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.Neighbor.Capabilities.ReceivedCaps']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:capabilities'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.received_caps is not None:
for child_ref in self.received_caps:
if child_ref._has_data():
return True
if self.sent_caps is not None:
for child_ref in self.sent_caps:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.Neighbor.Capabilities']['meta_info']
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
if self.lsr_id is None:
raise YPYModelError('Key property lsr_id is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:neighbors/Cisco-IOS-XE-mpls-ldp:neighbor[Cisco-IOS-XE-mpls-ldp:vrf-name = ' + str(self.vrf_name) + '][Cisco-IOS-XE-mpls-ldp:lsr-id = ' + str(self.lsr_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.vrf_name is not None:
return True
if self.lsr_id is not None:
return True
if self.advertise_bgp_prefixes is not None:
return True
if self.bgp_advertisement_state is not None:
return True
if self.capabilities is not None and self.capabilities._has_data():
return True
if self.client is not None:
for child in self.client:
if child is not None:
return True
if self.downstream_on_demand is not None:
return True
if self.duplicate_address is not None:
for child in self.duplicate_address:
if child is not None:
return True
if self.graceful_restart_adjacency is not None and self.graceful_restart_adjacency._has_data():
return True
if self.has_sp is not None:
return True
if self.inbound_ipv4 is not None:
return True
if self.inbound_ipv6_filter is not None:
return True
if self.label_space_id is not None:
return True
if self.nbr_bound_address is not None:
for child in self.nbr_bound_address:
if child is not None:
return True
if self.nbr_path_vector_limit is not None:
return True
if self.nbr_stats is not None and self.nbr_stats._has_data():
return True
if self.outbound_ipv4_filter is not None:
return True
if self.outbound_ipv6_filter is not None:
return True
if self.peer_hold_time is not None:
return True
if self.peer_keep_alive_interval is not None:
return True
if self.peer_state is not None:
return True
if self.session_prot_ver is not None:
return True
if self.session_role is not None:
return True
if self.sp_duration is not None:
return True
if self.sp_filter is not None:
return True
if self.sp_has_duration is not None:
return True
if self.sp_state is not None:
return True
if self.spht_remaining is not None:
return True
if self.spht_running is not None:
return True
if self.tcp_information is not None and self.tcp_information._has_data():
return True
if self.up_time_seconds is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.Neighbor']['meta_info']
class NbrAdjs(object):
"""
For this Neighbor, this is the list of adjacencies
between the neighbor and the local node.
.. attribute:: interface
This is the interface used by MPLS LDP Link Hello
**type**\: str
**refers to**\: :py:class:`name <ydk.models.ietf.ietf_interfaces.Interfaces.Interface>`
.. attribute:: local_address
This is the local address used to send the Targeted Hello
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: target_address
This is the destination address used to send the Targeted Hello
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: target_state
This is the state of this Targeted Hello instance
**type**\: :py:class:`DhcStateEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.DhcStateEnum>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.interface = None
self.local_address = None
self.target_address = None
self.target_state = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:neighbors/Cisco-IOS-XE-mpls-ldp:nbr-adjs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface is not None:
return True
if self.local_address is not None:
return True
if self.target_address is not None:
return True
if self.target_state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.NbrAdjs']['meta_info']
class StatsInfo(object):
"""
MPLS LDP Statistics Information
.. attribute:: bad_ldpid
This object counts the number of Bad LDP Identifier Fatal Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_msg_len
This object counts the number of Bad Message Length Fatal Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_pdu_len
This object counts the number of Bad PDU Length Fatal Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_tlv_len
This object counts the number of Bad TLV Length Fatal Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: discon_time
The value of sysUpTime on the most recent occasion at which any one or more of this entity's counters suffered a discontinuity. The relevant counters are the specific instances associated with this entity of any counter32 object contained in the 'EntityStatsTable'. If no such discontinuities have occurred since the last re\-initialization of the local management subsystem, then this object contains a zero value
**type**\: int
**range:** 0..4294967295
.. attribute:: keep_alive_exp
This object counts the number of Session Keep Alive Timer Expired Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: malformed_tlv_val
This object counts the number of Malformed TLV Value Fatal Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: message_in
MPLS LDP message received counters from this neighbor
**type**\: :py:class:`MessageIn <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.StatsInfo.MessageIn>`
.. attribute:: message_out
MPLS LDP message sent counters to this neighbor
**type**\: :py:class:`MessageOut <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.StatsInfo.MessageOut>`
.. attribute:: sess_rej_ad
A count of the Session Rejected/Parameters Advertisement Mode Error Notification Messages sent or received by this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: sess_rej_lr
A count of the Session Rejected/Parameters Label Range Notification Messages sent or received by this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: sess_rej_max_pdu
A count of the Session Rejected/Parameters Max Pdu Length Error Notification Messages sent or received by this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: sess_reject_no_hello
A count of the Session Rejected/No Hello Error Notification Messages sent or received by this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: session_attempts
A count of the Session Initialization messages which were sent or received by this LDP Entity and were NAK'd. In other words, this counter counts the number of session initializations that failed. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: shutdow_notif_sent
This object counts the number of Shutdown Notfications sent related to session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: shutdown_notif_rec
This object counts the number of Shutdown Notifications received related to session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.bad_ldpid = None
self.bad_msg_len = None
self.bad_pdu_len = None
self.bad_tlv_len = None
self.discon_time = None
self.keep_alive_exp = None
self.malformed_tlv_val = None
self.message_in = MplsLdp.MplsLdpState.Neighbors.StatsInfo.MessageIn()
self.message_in.parent = self
self.message_out = MplsLdp.MplsLdpState.Neighbors.StatsInfo.MessageOut()
self.message_out.parent = self
self.sess_rej_ad = None
self.sess_rej_lr = None
self.sess_rej_max_pdu = None
self.sess_reject_no_hello = None
self.session_attempts = None
self.shutdow_notif_sent = None
self.shutdown_notif_rec = None
class MessageOut(object):
"""
MPLS LDP message sent counters to this neighbor.
.. attribute:: address_count
Address message count
**type**\: int
**range:** 0..4294967295
.. attribute:: address_withdraw_count
Address withdraw count
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_rg_app_data_count
ICCP RG App Data count
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_rg_conn_count
ICCP RG Connect count
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_rg_disconn_count
ICCP RG Disconnect count
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_rg_notif_count
ICCP RG Notify count
**type**\: int
**range:** 0..4294967295
.. attribute:: init_count
Init message count
**type**\: int
**range:** 0..4294967295
.. attribute:: keep_alive_count
Keepalive count
**type**\: int
**range:** 0..4294967295
.. attribute:: label_abort_request_count
Label abort request count
**type**\: int
**range:** 0..4294967295
.. attribute:: label_map_count
Label map count
**type**\: int
**range:** 0..4294967295
.. attribute:: label_release_count
Label release count
**type**\: int
**range:** 0..4294967295
.. attribute:: label_request_count
Label request count
**type**\: int
**range:** 0..4294967295
.. attribute:: label_withdraw_count
Label withdraw count
**type**\: int
**range:** 0..4294967295
.. attribute:: notification_count
Notification count
**type**\: int
**range:** 0..4294967295
.. attribute:: total_count
Total count of all messages
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.address_count = None
self.address_withdraw_count = None
self.iccp_rg_app_data_count = None
self.iccp_rg_conn_count = None
self.iccp_rg_disconn_count = None
self.iccp_rg_notif_count = None
self.init_count = None
self.keep_alive_count = None
self.label_abort_request_count = None
self.label_map_count = None
self.label_release_count = None
self.label_request_count = None
self.label_withdraw_count = None
self.notification_count = None
self.total_count = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:neighbors/Cisco-IOS-XE-mpls-ldp:stats-info/Cisco-IOS-XE-mpls-ldp:message-out'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.address_count is not None:
return True
if self.address_withdraw_count is not None:
return True
if self.iccp_rg_app_data_count is not None:
return True
if self.iccp_rg_conn_count is not None:
return True
if self.iccp_rg_disconn_count is not None:
return True
if self.iccp_rg_notif_count is not None:
return True
if self.init_count is not None:
return True
if self.keep_alive_count is not None:
return True
if self.label_abort_request_count is not None:
return True
if self.label_map_count is not None:
return True
if self.label_release_count is not None:
return True
if self.label_request_count is not None:
return True
if self.label_withdraw_count is not None:
return True
if self.notification_count is not None:
return True
if self.total_count is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.StatsInfo.MessageOut']['meta_info']
class MessageIn(object):
"""
MPLS LDP message received counters from this
neighbor.
.. attribute:: address_count
Address message count
**type**\: int
**range:** 0..4294967295
.. attribute:: address_withdraw_count
Address withdraw count
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_rg_app_data_count
ICCP RG App Data count
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_rg_conn_count
ICCP RG Connect count
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_rg_disconn_count
ICCP RG Disconnect count
**type**\: int
**range:** 0..4294967295
.. attribute:: iccp_rg_notif_count
ICCP RG Notify count
**type**\: int
**range:** 0..4294967295
.. attribute:: init_count
Init message count
**type**\: int
**range:** 0..4294967295
.. attribute:: keep_alive_count
Keepalive count
**type**\: int
**range:** 0..4294967295
.. attribute:: label_abort_request_count
Label abort request count
**type**\: int
**range:** 0..4294967295
.. attribute:: label_map_count
Label map count
**type**\: int
**range:** 0..4294967295
.. attribute:: label_release_count
Label release count
**type**\: int
**range:** 0..4294967295
.. attribute:: label_request_count
Label request count
**type**\: int
**range:** 0..4294967295
.. attribute:: label_withdraw_count
Label withdraw count
**type**\: int
**range:** 0..4294967295
.. attribute:: notification_count
Notification count
**type**\: int
**range:** 0..4294967295
.. attribute:: total_count
Total count of all messages
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.address_count = None
self.address_withdraw_count = None
self.iccp_rg_app_data_count = None
self.iccp_rg_conn_count = None
self.iccp_rg_disconn_count = None
self.iccp_rg_notif_count = None
self.init_count = None
self.keep_alive_count = None
self.label_abort_request_count = None
self.label_map_count = None
self.label_release_count = None
self.label_request_count = None
self.label_withdraw_count = None
self.notification_count = None
self.total_count = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:neighbors/Cisco-IOS-XE-mpls-ldp:stats-info/Cisco-IOS-XE-mpls-ldp:message-in'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.address_count is not None:
return True
if self.address_withdraw_count is not None:
return True
if self.iccp_rg_app_data_count is not None:
return True
if self.iccp_rg_conn_count is not None:
return True
if self.iccp_rg_disconn_count is not None:
return True
if self.iccp_rg_notif_count is not None:
return True
if self.init_count is not None:
return True
if self.keep_alive_count is not None:
return True
if self.label_abort_request_count is not None:
return True
if self.label_map_count is not None:
return True
if self.label_release_count is not None:
return True
if self.label_request_count is not None:
return True
if self.label_withdraw_count is not None:
return True
if self.notification_count is not None:
return True
if self.total_count is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.StatsInfo.MessageIn']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:neighbors/Cisco-IOS-XE-mpls-ldp:stats-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_ldpid is not None:
return True
if self.bad_msg_len is not None:
return True
if self.bad_pdu_len is not None:
return True
if self.bad_tlv_len is not None:
return True
if self.discon_time is not None:
return True
if self.keep_alive_exp is not None:
return True
if self.malformed_tlv_val is not None:
return True
if self.message_in is not None and self.message_in._has_data():
return True
if self.message_out is not None and self.message_out._has_data():
return True
if self.sess_rej_ad is not None:
return True
if self.sess_rej_lr is not None:
return True
if self.sess_rej_max_pdu is not None:
return True
if self.sess_reject_no_hello is not None:
return True
if self.session_attempts is not None:
return True
if self.shutdow_notif_sent is not None:
return True
if self.shutdown_notif_rec is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.StatsInfo']['meta_info']
class Backoffs(object):
"""
LDP Backoff Information
.. attribute:: backoff_seconds
Current neighbor backoff count in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: waiting_seconds
Current neighbor backoff waiting count in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.backoff_seconds = None
self.waiting_seconds = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:neighbors/Cisco-IOS-XE-mpls-ldp:backoffs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.backoff_seconds is not None:
return True
if self.waiting_seconds is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.Backoffs']['meta_info']
class NsrNbrDetail(object):
"""
This is the LDP NSR state for this neighbor.
.. attribute:: nbr_sess
This container holds session information about the sessions between these two neighbors
**type**\: :py:class:`NbrSess <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.NsrNbrDetail.NbrSess>`
.. attribute:: nsr_nbr_in_label_reqs_created
In label Request Records created
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_nbr_in_label_reqs_freed
In label Request Records freed
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_nbr_in_label_withdraw_created
In label Withdraw Records created
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_nbr_in_label_withdraw_freed
In label Withdraw Records freed
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_nbr_last_sync_error
This is the last NSR sync error recieved. It indicates the last reason the sync failed even if the sync has now succeeded. This allows this information to be viewed when the state is flapping, even if the syncronization is successful at the time of the query
**type**\: :py:class:`NsrPeerSyncErrIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.NsrPeerSyncErrIdentity>`
.. attribute:: nsr_nbr_last_sync_nack_reason
Last NSR sync NACK reason
**type**\: :py:class:`NsrSyncNackRsnIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.NsrSyncNackRsnIdentity>`
.. attribute:: nsr_nbr_lcl_addr_withdraw_cleared
Local Address Withdraw cleared
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_nbr_lcl_addr_withdraw_set
Local Address Withdraw set
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_nbr_pend_label_req_resps
Pending Label\-Request responses
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_nbr_pend_label_withdraw_resps
Pending Label\-Withdraw responses
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_nbr_pend_lcl_addr_withdraw_acks
Pending Local Address Withdraw Acks\:
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_nbr_sync_state
NSR Sync State
**type**\: :py:class:`NsrPeerSyncStateIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.NsrPeerSyncStateIdentity>`
.. attribute:: nsr_nbr_xmit_ctxt_deq
Transmit contexts dequeued
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_nbr_xmit_ctxt_enq
Transmit contexts enqueued
**type**\: int
**range:** 0..4294967295
.. attribute:: nsr_state
Non\-Stop Routing State
**type**\: :py:class:`NsrStatusIdentity <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.NsrStatusIdentity>`
.. attribute:: path_vector_limit
If the value of this object is 0 (zero) then Loop Dection for Path Vectors for this Peer is disabled. Otherwise, if this object has a value greater than zero, then Loop Dection for Path Vectors for this Peer is enabled and the Path Vector Limit is this value
**type**\: int
**range:** 0..255
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.nbr_sess = MplsLdp.MplsLdpState.Neighbors.NsrNbrDetail.NbrSess()
self.nbr_sess.parent = self
self.nsr_nbr_in_label_reqs_created = None
self.nsr_nbr_in_label_reqs_freed = None
self.nsr_nbr_in_label_withdraw_created = None
self.nsr_nbr_in_label_withdraw_freed = None
self.nsr_nbr_last_sync_error = None
self.nsr_nbr_last_sync_nack_reason = None
self.nsr_nbr_lcl_addr_withdraw_cleared = None
self.nsr_nbr_lcl_addr_withdraw_set = None
self.nsr_nbr_pend_label_req_resps = None
self.nsr_nbr_pend_label_withdraw_resps = None
self.nsr_nbr_pend_lcl_addr_withdraw_acks = None
self.nsr_nbr_sync_state = None
self.nsr_nbr_xmit_ctxt_deq = None
self.nsr_nbr_xmit_ctxt_enq = None
self.nsr_state = None
self.path_vector_limit = None
class NbrSess(object):
"""
This container holds session information about the
sessions between these two neighbors.
.. attribute:: discon_time
The value of sysUpTime on the most recent occasion at which any one or more of this session's counters suffered a discontinuity. The relevant counters are the specific instances associated with this session of any counter32 object contained in the session\-stats table. The initial value of this object is the value of sysUpTime when the entry was created in this table. Also, a command generator can distinguish when a session between a given Entity and Peer goes away and a new session is established. This value would change and thus indicate to the command generator that this is a different session
**type**\: int
**range:** 0..4294967295
.. attribute:: keep_alive_remain
The keep alive hold time remaining for this session in seconds
**type**\: int
**range:** 0..4294967295
**units**\: seconds
.. attribute:: keep_alive_time
The negotiated KeepAlive Time which represents the amount of seconds between keep alive messages. The EntityKeepAliveHoldTimer related to this Session is the value that was proposed as the KeepAlive Time for this session. This value is negotiated during session initialization between the entity's proposed value (i.e., the value configured in EntityKeepAliveHoldTimer) and the peer's proposed KeepAlive Hold Timer value. This value is the smaller of the two proposed values
**type**\: int
**range:** 1..65535
**units**\: seconds
.. attribute:: last_stat_change
The value of sysUpTime at the time this Session entered its current state as denoted by the SessionState object
**type**\: int
**range:** 0..4294967295
.. attribute:: max_pdu
The value of maximum allowable length for LDP PDUs this session. This value may have been negotiated for during the Session Initialization. This object is related to the EntityMaxPduLength object. The EntityMaxPduLength object specifies the requested LDP PDU length, and this object reflects the negotiated LDP PDU length between the Entity and the Peer
**type**\: int
**range:** 1..65535
**units**\: octets
.. attribute:: state
The current state of the session, all of the states 1 to 5 are based on the state machine for session negotiation behavior
**type**\: :py:class:`StateEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.Neighbors.NsrNbrDetail.NbrSess.StateEnum>`
.. attribute:: unknown_mess_err
This object counts the number of Unknown Message Type Errors detected by this LSR/LER during this session. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_tlv
This object counts the number of Unknown TLV Errors detected by this LSR/LER during this session. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of discon\-time
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.discon_time = None
self.keep_alive_remain = None
self.keep_alive_time = None
self.last_stat_change = None
self.max_pdu = None
self.state = None
self.unknown_mess_err = None
self.unknown_tlv = None
class StateEnum(Enum):
"""
StateEnum
The current state of the session, all of the
states 1 to 5 are based on the state machine
for session negotiation behavior.
.. data:: nonexistent = 1
State: nonexistent.
.. data:: initialized = 2
State: initialized.
.. data:: openrec = 3
State: openrec.
.. data:: opensent = 4
State: opensent.
.. data:: operational = 5
State: operational.
"""
nonexistent = 1
initialized = 2
openrec = 3
opensent = 4
operational = 5
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.NsrNbrDetail.NbrSess.StateEnum']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:neighbors/Cisco-IOS-XE-mpls-ldp:nsr-nbr-detail/Cisco-IOS-XE-mpls-ldp:nbr-sess'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.discon_time is not None:
return True
if self.keep_alive_remain is not None:
return True
if self.keep_alive_time is not None:
return True
if self.last_stat_change is not None:
return True
if self.max_pdu is not None:
return True
if self.state is not None:
return True
if self.unknown_mess_err is not None:
return True
if self.unknown_tlv is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.NsrNbrDetail.NbrSess']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:neighbors/Cisco-IOS-XE-mpls-ldp:nsr-nbr-detail'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.nbr_sess is not None and self.nbr_sess._has_data():
return True
if self.nsr_nbr_in_label_reqs_created is not None:
return True
if self.nsr_nbr_in_label_reqs_freed is not None:
return True
if self.nsr_nbr_in_label_withdraw_created is not None:
return True
if self.nsr_nbr_in_label_withdraw_freed is not None:
return True
if self.nsr_nbr_last_sync_error is not None:
return True
if self.nsr_nbr_last_sync_nack_reason is not None:
return True
if self.nsr_nbr_lcl_addr_withdraw_cleared is not None:
return True
if self.nsr_nbr_lcl_addr_withdraw_set is not None:
return True
if self.nsr_nbr_pend_label_req_resps is not None:
return True
if self.nsr_nbr_pend_label_withdraw_resps is not None:
return True
if self.nsr_nbr_pend_lcl_addr_withdraw_acks is not None:
return True
if self.nsr_nbr_sync_state is not None:
return True
if self.nsr_nbr_xmit_ctxt_deq is not None:
return True
if self.nsr_nbr_xmit_ctxt_enq is not None:
return True
if self.nsr_state is not None:
return True
if self.path_vector_limit is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors.NsrNbrDetail']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:neighbors'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.backoffs is not None and self.backoffs._has_data():
return True
if self.nbr_adjs is not None:
for child_ref in self.nbr_adjs:
if child_ref._has_data():
return True
if self.neighbor is not None:
for child_ref in self.neighbor:
if child_ref._has_data():
return True
if self.nsr_nbr_detail is not None and self.nsr_nbr_detail._has_data():
return True
if self.stats_info is not None and self.stats_info._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.Neighbors']['meta_info']
class LabelRanges(object):
"""
This contaions holds all the label ranges in use
by this LDP instance.
.. attribute:: label_range
This entry contains a single range of labels represented by the configured Upper and Lower Bounds pairs. NOTE\: there is NO corresponding LDP message which relates to the information in this table, however, this table does provide a way for a user to 'reserve' a generic label range. NOTE\: The ranges for a specific LDP Entity are UNIQUE and non\-overlapping
**type**\: list of :py:class:`LabelRange <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpState.LabelRanges.LabelRange>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.label_range = YList()
self.label_range.parent = self
self.label_range.name = 'label_range'
class LabelRange(object):
"""
This entry contains a single range of labels
represented by the configured Upper and Lower
Bounds pairs. NOTE\: there is NO corresponding
LDP message which relates to the information
in this table, however, this table does provide
a way for a user to 'reserve' a generic label
range.
NOTE\: The ranges for a specific LDP Entity
are UNIQUE and non\-overlapping.
.. attribute:: lr_min <key>
The minimum label configured for this range
**type**\: int
**range:** 0..1048575
.. attribute:: lr_max <key>
The maximum label configured for this range
**type**\: int
**range:** 0..1048575
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.lr_min = None
self.lr_max = None
@property
def _common_path(self):
if self.lr_min is None:
raise YPYModelError('Key property lr_min is None')
if self.lr_max is None:
raise YPYModelError('Key property lr_max is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:label-ranges/Cisco-IOS-XE-mpls-ldp:label-range[Cisco-IOS-XE-mpls-ldp:lr-min = ' + str(self.lr_min) + '][Cisco-IOS-XE-mpls-ldp:lr-max = ' + str(self.lr_max) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.lr_min is not None:
return True
if self.lr_max is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.LabelRanges.LabelRange']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state/Cisco-IOS-XE-mpls-ldp:label-ranges'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.label_range is not None:
for child_ref in self.label_range:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState.LabelRanges']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.backoff_parameters is not None and self.backoff_parameters._has_data():
return True
if self.bindings is not None and self.bindings._has_data():
return True
if self.bindings_summary is not None and self.bindings_summary._has_data():
return True
if self.capabilities is not None and self.capabilities._has_data():
return True
if self.discovery is not None and self.discovery._has_data():
return True
if self.forwarding is not None and self.forwarding._has_data():
return True
if self.forwarding_summary is not None and self.forwarding_summary._has_data():
return True
if self.graceful_restart is not None and self.graceful_restart._has_data():
return True
if self.icpm_summary_all is not None and self.icpm_summary_all._has_data():
return True
if self.label_ranges is not None and self.label_ranges._has_data():
return True
if self.neighbors is not None and self.neighbors._has_data():
return True
if self.nsr_summary_all is not None and self.nsr_summary_all._has_data():
return True
if self.oper_summary is not None and self.oper_summary._has_data():
return True
if self.parameters is not None and self.parameters._has_data():
return True
if self.vrfs is not None and self.vrfs._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpState']['meta_info']
class MplsLdpConfig(object):
"""
MPLS LDP Configuration.
.. attribute:: discovery
LDP discovery
**type**\: :py:class:`Discovery <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Discovery>`
.. attribute:: dual_stack
This container holds the configuration of dual IPv4 and IPv6 stack peers
**type**\: :py:class:`DualStack <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.DualStack>`
.. attribute:: global_cfg
This contains hold all MPLS LDP Configuration with Global scope. These values affect the entire LSR unless overiddden by a parameter with a more localized scope
**type**\: :py:class:`GlobalCfg <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.GlobalCfg>`
.. attribute:: graceful_restart
Configure LDP Graceful Restart
**type**\: :py:class:`GracefulRestart <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.GracefulRestart>`
.. attribute:: interfaces
MPLS LDP Interface configuration commands
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Interfaces>`
.. attribute:: label_cfg
This container holds the label allocation and advertisement configuration for the LDP Label Information Base. These control what prefixes may be allocated and advertised to peers
**type**\: :py:class:`LabelCfg <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.LabelCfg>`
.. attribute:: logging
Enable LDP logging
**type**\: :py:class:`Logging <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Logging>`
.. attribute:: nbr_table
This container holds the list of neighbor configuration parameters
**type**\: :py:class:`NbrTable <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.NbrTable>`
.. attribute:: passwords
This holds the MPLS LDP password configuration for use with LDP neighbors
**type**\: :py:class:`Passwords <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Passwords>`
.. attribute:: routing
This containter provides the MPLS LDP config for routing protocols from which it can obtain addresses to associate with labels
**type**\: :py:class:`Routing <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Routing>`
.. attribute:: session
Configure session parameters
**type**\: :py:class:`Session <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Session>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.discovery = MplsLdp.MplsLdpConfig.Discovery()
self.discovery.parent = self
self.dual_stack = MplsLdp.MplsLdpConfig.DualStack()
self.dual_stack.parent = self
self.global_cfg = MplsLdp.MplsLdpConfig.GlobalCfg()
self.global_cfg.parent = self
self.graceful_restart = MplsLdp.MplsLdpConfig.GracefulRestart()
self.graceful_restart.parent = self
self.interfaces = MplsLdp.MplsLdpConfig.Interfaces()
self.interfaces.parent = self
self.label_cfg = MplsLdp.MplsLdpConfig.LabelCfg()
self.label_cfg.parent = self
self.logging = MplsLdp.MplsLdpConfig.Logging()
self.logging.parent = self
self.nbr_table = MplsLdp.MplsLdpConfig.NbrTable()
self.nbr_table.parent = self
self.passwords = MplsLdp.MplsLdpConfig.Passwords()
self.passwords.parent = self
self.routing = MplsLdp.MplsLdpConfig.Routing()
self.routing.parent = self
self.session = MplsLdp.MplsLdpConfig.Session()
self.session.parent = self
class GlobalCfg(object):
"""
This contains hold all MPLS LDP Configuration with Global
scope. These values affect the entire LSR unless
overiddden by a parameter with a more localized scope.
.. attribute:: admin_status
This leaf controls the administrative status of LDP for this LSR. If set to disable, then all LDP activity will be disabled and all LDP sessions with peers will terminate. The LDP configuration will remain intact. When the admin status is set back to 'enable', then LDP will resume operations and attempt to establish new sessions with the peers
**type**\: :py:class:`AdminStatusEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.GlobalCfg.AdminStatusEnum>`
.. attribute:: dcsp_val
This sets the 6\-bit Differentiated Services Code Point (DSCP) value in the TCP packets for LDP messages being sent from the LSR
**type**\: int
**range:** 0..63
.. attribute:: disable_delay
This choice causes IGP sync up immediately upon session up
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: disable_delay_proc
This choice causes IGP sync up immediately upon session up
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: disable_quick_start
When set to true, disable LDP discovery's quick start mode for this LSR
**type**\: bool
.. attribute:: enable_nsr
This leaf controls whether Non\-Stop\-Routing should be enabled to include MPLS LDP
**type**\: bool
.. attribute:: high_priority
This sets the priority within the LSR for TCP packets for LDP messages being sent from the LSR. They are given a higher transmission priorty and will avoid being queued behind lower priority messages
**type**\: bool
.. attribute:: init_sess_thresh
When attempting to establish a session with a given Peer, the given LDP Entity should send out the YANG notification, 'init\-sess\-thresh\-ex', when the number of Session Initialization messages sent exceeds this threshold. The notification is used to notify an operator when this Entity and its Peer are possibly engaged in an endless sequence of messages as each NAKs the other's Initialization messages with Error Notification messages. Setting this threshold which triggers the notification is one way to notify the operator. The notification should be generated each time this threshold is exceeded and for every subsequent Initialization message which is NAK'd with an Error Notification message after this threshold is exceeded. A value of 0 (zero) for this object indicates that the threshold is infinity, thus the YANG notification will never be generated
**type**\: int
**range:** 0..100
.. attribute:: loop_detection
This leaf enables or disables Loop Detection globally for the LSR
**type**\: bool
.. attribute:: per_af
This container holds the global per address family configuration
**type**\: :py:class:`PerAf <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.GlobalCfg.PerAf>`
.. attribute:: protocol
This leaf defines the protocol to be used. The default is LDP
**type**\: :py:class:`ProtocolEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.GlobalCfg.ProtocolEnum>`
.. attribute:: router_id
Configuration for LDP Router ID (LDP ID)
**type**\: list of :py:class:`RouterId <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.GlobalCfg.RouterId>`
.. attribute:: seconds
Time in seconds to delay IGP sync after session comes up
**type**\: int
**range:** 5..300
**units**\: second
.. attribute:: seconds_delay_proc
Time in seconds to delay IGP sync after session comes up
**type**\: int
**range:** 5..300
**units**\: second
.. attribute:: session
Configure session parameters. Session parameters effect the session between LDP peers once the session has been established
**type**\: :py:class:`Session <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.GlobalCfg.Session>`
.. attribute:: shutdown
Writing this leaf tears down all LDP sessions, withdraws all outgoing labels from the forwarding plane, and frees all local labels that have been allocated
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.admin_status = None
self.dcsp_val = None
self.disable_delay = None
self.disable_delay_proc = None
self.disable_quick_start = None
self.enable_nsr = None
self.high_priority = None
self.init_sess_thresh = None
self.loop_detection = None
self.per_af = MplsLdp.MplsLdpConfig.GlobalCfg.PerAf()
self.per_af.parent = self
self.protocol = None
self.router_id = YList()
self.router_id.parent = self
self.router_id.name = 'router_id'
self.seconds = None
self.seconds_delay_proc = None
self.session = MplsLdp.MplsLdpConfig.GlobalCfg.Session()
self.session.parent = self
self.shutdown = None
class AdminStatusEnum(Enum):
"""
AdminStatusEnum
This leaf controls the administrative status of LDP for
this LSR. If set to disable, then all LDP activity will
be disabled and all LDP sessions with peers will
terminate. The LDP configuration will remain intact.
When the admin status is set back to 'enable', then
LDP will resume operations and attempt to establish new
sessions with the peers.
.. data:: enable = 1
Enable LDP globablly on this LSR.
.. data:: disable = 2
Disable LDP globablly on this LSR.
"""
enable = 1
disable = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.GlobalCfg.AdminStatusEnum']
class ProtocolEnum(Enum):
"""
ProtocolEnum
This leaf defines the protocol to be used. The default
is LDP.
.. data:: ldp = 1
This LSR should use the LDP tagging protocol.
.. data:: tdp = 2
This LSR should use the TDP tagging protocol.
.. data:: both = 3
This LSR should use the both LDP and TDP tagging
protocol.
"""
ldp = 1
tdp = 2
both = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.GlobalCfg.ProtocolEnum']
class RouterId(object):
"""
Configuration for LDP Router ID (LDP ID)
.. attribute:: vrf_name <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: force
Force the router to use the specified identifier as the router ID more quickly
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: lsr_id_if
This defines the interface to use for the LDP LSR identifier address for all sessions. The IP address of this interface will be used as the identifier
**type**\: str
**refers to**\: :py:class:`name <ydk.models.ietf.ietf_interfaces.Interfaces.Interface>`
.. attribute:: lsr_id_ip
This is the IP address to be used as the LDP LSR ID for all sessions
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf_name = None
self.force = None
self.lsr_id_if = None
self.lsr_id_ip = None
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:global-cfg/Cisco-IOS-XE-mpls-ldp:router-id[Cisco-IOS-XE-mpls-ldp:vrf-name = ' + str(self.vrf_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.vrf_name is not None:
return True
if self.force is not None:
return True
if self.lsr_id_if is not None:
return True
if self.lsr_id_ip is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.GlobalCfg.RouterId']['meta_info']
class Session(object):
"""
Configure session parameters. Session parameters effect
the session between LDP peers once the session has been
established.
.. attribute:: backoff_init
Initial session backoff time (seconds). The LDP backoff mechanism prevents two incompatibly configured label switch routers (LSRs) from engaging in an unthrottled sequence of session setup failures. For example, an incompatibility arises when two neighboring routers attempt to perform LC\-ATM (label\-controlled ATM) but the two are using different ranges of VPI/VCI values for labels. If a session setup attempt fails due to an incompatibility, each LSR delays its next attempt (that is, backs off), increasing the delay exponentially with each successive failure until the maximum backoff delay is reached. The default settings correspond to the lowest settings for initial and maximum backoff values defined by the LDP protocol specification. You should change the settings from the default values only if such settings result in undesirable behavior
**type**\: int
**range:** 0..4294967295
**units**\: seconds
**default value**\: 15
.. attribute:: backoff_max
The maximum session backoff time (seconds) The LDP backoff mechanism prevents two incompatibly configured label switch routers (LSRs) from engaging in an unthrottled sequence of session setup failures. For example, an incompatibility arises when two neighboring routers attempt to perform LC\-ATM (label\-controlled ATM) but the two are using different ranges of VPI/VCI values for labels. If a session setup attempt fails due to an incompatibility, each LSR delays its next attempt (that is, backs off), increasing the delay exponentially with each successive failure until the maximum backoff delay is reached. The default settings correspond to the lowest settings for initial and maximum backoff values defined by the LDP protocol specification. You should change the settings from the default values only if such settings result in undesirable behavior
**type**\: int
**range:** 0..4294967295
**units**\: seconds
**default value**\: 15
.. attribute:: downstream_on_demand
This container holds config for Downstream on Demand. For it to be enabled, the Downstream on demand feature has to be configured on both peers of the session. If only one peer in the session has downstream\-on\-demand feature configured, then the session does not use downstream\-on\-demand mode. If, after, a label request is sent, and no remote label is received from the peer, the router will periodically resend the label request. After the peer advertises a label after receiving the label request, it will automatically readvertise the label if any label attribute changes subsequently
**type**\: list of :py:class:`DownstreamOnDemand <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.GlobalCfg.Session.DownstreamOnDemand>`
.. attribute:: infinite
If set to true, the session is held indefinitely in the absence of LDP messages from the peer
**type**\: bool
.. attribute:: protection
Configure Session Protection parameters
**type**\: :py:class:`Protection <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.GlobalCfg.Session.Protection>`
.. attribute:: seconds
Number from 15 to 2147483, that defines the time, in seconds, an LDP session is maintained in the absence of LDP messages from the session peer
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.backoff_init = None
self.backoff_max = None
self.downstream_on_demand = YList()
self.downstream_on_demand.parent = self
self.downstream_on_demand.name = 'downstream_on_demand'
self.infinite = None
self.protection = MplsLdp.MplsLdpConfig.GlobalCfg.Session.Protection()
self.protection.parent = self
self.seconds = None
class DownstreamOnDemand(object):
"""
This container holds config for Downstream on Demand.
For it to be enabled, the Downstream on demand
feature has to be configured on both peers of the
session. If only one peer in the session has
downstream\-on\-demand feature configured, then the
session does not use downstream\-on\-demand mode.
If, after, a label request is sent, and no remote
label is received from the peer, the router will
periodically resend the label request. After the
peer advertises a label after receiving the label
request, it will automatically readvertise the label
if any label attribute changes subsequently.
.. attribute:: vrf_name <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: enabled
Enable Downstream on Demand for this LSR. In this mode a label is not advertised to a peer, unless the peer explicitly requests it. At the same time, since the peer does not automatically advertise labels, the label request is sent whenever the next\-hop points out to a peer that no remote label has been assigned
**type**\: bool
.. attribute:: filter
This filter contains a list of peer IDs that are configured for downstream\-on\-demand mode. When the filter is changed or configured, the list of established neighbors is traversed. If a session's downstream\-on\-demand configuration has changed, the session is reset in order that the new down\-stream\-on\-demand mode can be configured. The reason for resetting the session is to ensure that the labels are properly advertised between the peers. When a new session is established, the ACL is verified to determine whether the session should negotiate for downstream\-on\-demand mode. If the filter string is configured and the corresponding filter does not exist or is empty, then downstream\-on\-demand mode is not configured for any neighbor. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf_name = None
self.enabled = None
self.filter = None
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:global-cfg/Cisco-IOS-XE-mpls-ldp:session/Cisco-IOS-XE-mpls-ldp:downstream-on-demand[Cisco-IOS-XE-mpls-ldp:vrf-name = ' + str(self.vrf_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.vrf_name is not None:
return True
if self.enabled is not None:
return True
if self.filter is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.GlobalCfg.Session.DownstreamOnDemand']['meta_info']
class Protection(object):
"""
Configure Session Protection parameters
.. attribute:: enable_prot
This is set true to enable session protection
**type**\: bool
.. attribute:: inf
This sessiom holdup duration is infinite
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: peer_filter
This is an optional filter to restrict session protection. If the string is null or unconfigured then session protection applied to all peers. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
.. attribute:: seconds
This is the sessiom holdup duration in seconds
**type**\: int
**range:** 30..2147483
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.enable_prot = None
self.inf = None
self.peer_filter = None
self.seconds = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:global-cfg/Cisco-IOS-XE-mpls-ldp:session/Cisco-IOS-XE-mpls-ldp:protection'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.enable_prot is not None:
return True
if self.inf is not None:
return True
if self.peer_filter is not None:
return True
if self.seconds is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.GlobalCfg.Session.Protection']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:global-cfg/Cisco-IOS-XE-mpls-ldp:session'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.backoff_init is not None:
return True
if self.backoff_max is not None:
return True
if self.downstream_on_demand is not None:
for child_ref in self.downstream_on_demand:
if child_ref._has_data():
return True
if self.infinite is not None:
return True
if self.protection is not None and self.protection._has_data():
return True
if self.seconds is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.GlobalCfg.Session']['meta_info']
class PerAf(object):
"""
This container holds the global per address family
configuration.
.. attribute:: af_cfg
This container holds the global per address family configuration
**type**\: list of :py:class:`AfCfg <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.GlobalCfg.PerAf.AfCfg>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.af_cfg = YList()
self.af_cfg.parent = self
self.af_cfg.name = 'af_cfg'
class AfCfg(object):
"""
This container holds the global per address family
configuration.
.. attribute:: vrf_name <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: af_name <key>
Address Family name
**type**\: :py:class:`AfEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.AfEnum>`
.. attribute:: default_route
When set true, this enables MPLS forwarding for the ip default route
**type**\: bool
.. attribute:: implicit
Do not advertise an explicit address in LDP discovery hello messages or advertise a default address. Use the default address for LDP transport
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: interface
Advertise this interface's address as the explicit address in LDP discovery hello messages and use it for LDP transport
**type**\: str
**refers to**\: :py:class:`name <ydk.models.ietf.ietf_interfaces.Interfaces.Interface>`
.. attribute:: ipaddr
Advertise this address as the explicit address in LDP discovery hello messages and use it for LDP transport
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf_name = None
self.af_name = None
self.default_route = None
self.implicit = None
self.interface = None
self.ipaddr = None
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
if self.af_name is None:
raise YPYModelError('Key property af_name is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:global-cfg/Cisco-IOS-XE-mpls-ldp:per-af/Cisco-IOS-XE-mpls-ldp:af-cfg[Cisco-IOS-XE-mpls-ldp:vrf-name = ' + str(self.vrf_name) + '][Cisco-IOS-XE-mpls-ldp:af-name = ' + str(self.af_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.vrf_name is not None:
return True
if self.af_name is not None:
return True
if self.default_route is not None:
return True
if self.implicit is not None:
return True
if self.interface is not None:
return True
if self.ipaddr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.GlobalCfg.PerAf.AfCfg']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:global-cfg/Cisco-IOS-XE-mpls-ldp:per-af'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.af_cfg is not None:
for child_ref in self.af_cfg:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.GlobalCfg.PerAf']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:global-cfg'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.admin_status is not None:
return True
if self.dcsp_val is not None:
return True
if self.disable_delay is not None:
return True
if self.disable_delay_proc is not None:
return True
if self.disable_quick_start is not None:
return True
if self.enable_nsr is not None:
return True
if self.high_priority is not None:
return True
if self.init_sess_thresh is not None:
return True
if self.loop_detection is not None:
return True
if self.per_af is not None and self.per_af._has_data():
return True
if self.protocol is not None:
return True
if self.router_id is not None:
for child_ref in self.router_id:
if child_ref._has_data():
return True
if self.seconds is not None:
return True
if self.seconds_delay_proc is not None:
return True
if self.session is not None and self.session._has_data():
return True
if self.shutdown is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.GlobalCfg']['meta_info']
class NbrTable(object):
"""
This container holds the list of neighbor configuration
parameters.
.. attribute:: nbr_cfg
This entry holds the configuration of a single neighbor identified by the IP address of that neighbor
**type**\: list of :py:class:`NbrCfg <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.NbrTable.NbrCfg>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.nbr_cfg = YList()
self.nbr_cfg.parent = self
self.nbr_cfg.name = 'nbr_cfg'
class NbrCfg(object):
"""
This entry holds the configuration of a single neighbor
identified by the IP address of that neighbor.
.. attribute:: nbr_vrf <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: nbr_ip <key>
The IP address for the LDP neighbor. This may be IPv4 or IPv6
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: admin_status
The administrative status of this neighbor. If this object is changed from 'enable' to 'disable' and this entity has already attempted to establish contact with a neighbor, a 'tear\-down' for that session is issued and the session and all information related to that session ceases to exist). When the admin status is set back to 'enable', then this Entity will attempt to establish a new session with the neighbor
**type**\: :py:class:`AdminStatusEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.NbrTable.NbrCfg.AdminStatusEnum>`
.. attribute:: implicit_withdraw
Enable LDP implicit withdraw label for this peer
**type**\: bool
.. attribute:: label_binding_filter
Accept only labels matching this filter. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
.. attribute:: label_protocol
This leaf defines the protocol to be used. The default is LDP
**type**\: :py:class:`LabelProtocolEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.NbrTable.NbrCfg.LabelProtocolEnum>`
.. attribute:: password
Enables password authentication and stores the password using a cryptographic hash
**type**\: str
.. attribute:: targeted
Establish or delete a targeted session
**type**\: bool
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.nbr_vrf = None
self.nbr_ip = None
self.admin_status = None
self.implicit_withdraw = None
self.label_binding_filter = None
self.label_protocol = None
self.password = None
self.targeted = None
class AdminStatusEnum(Enum):
"""
AdminStatusEnum
The administrative status of this neighbor.
If this object is changed from 'enable' to 'disable'
and this entity has already attempted to establish
contact with a neighbor, a 'tear\-down' for that session
is issued and the session and all information related
to that session ceases to exist).
When the admin status is set back to 'enable', then
this Entity will attempt to establish a new session
with the neighbor.
.. data:: enable = 1
Set the administrative status of this neighbor
to enabled.
.. data:: disable = 2
Set the administrative status of this neighbor
to disabled.
"""
enable = 1
disable = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.NbrTable.NbrCfg.AdminStatusEnum']
class LabelProtocolEnum(Enum):
"""
LabelProtocolEnum
This leaf defines the protocol to be used. The default
is LDP.
.. data:: ldp = 1
This LSR should use the LDP tagging protocol.
.. data:: tdp = 2
This LSR should use the TDP tagging protocol.
"""
ldp = 1
tdp = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.NbrTable.NbrCfg.LabelProtocolEnum']
@property
def _common_path(self):
if self.nbr_vrf is None:
raise YPYModelError('Key property nbr_vrf is None')
if self.nbr_ip is None:
raise YPYModelError('Key property nbr_ip is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:nbr-table/Cisco-IOS-XE-mpls-ldp:nbr-cfg[Cisco-IOS-XE-mpls-ldp:nbr-vrf = ' + str(self.nbr_vrf) + '][Cisco-IOS-XE-mpls-ldp:nbr-ip = ' + str(self.nbr_ip) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.nbr_vrf is not None:
return True
if self.nbr_ip is not None:
return True
if self.admin_status is not None:
return True
if self.implicit_withdraw is not None:
return True
if self.label_binding_filter is not None:
return True
if self.label_protocol is not None:
return True
if self.password is not None:
return True
if self.targeted is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.NbrTable.NbrCfg']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:nbr-table'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.nbr_cfg is not None:
for child_ref in self.nbr_cfg:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.NbrTable']['meta_info']
class Passwords(object):
"""
This holds the MPLS LDP password configuration for use
with LDP neighbors.
.. attribute:: password
This holds the MPLS LDP password configuration for use with a single LDP neighbor or group of LDP neighbors
**type**\: list of :py:class:`Password <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Passwords.Password>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.password = YList()
self.password.parent = self
self.password.name = 'password'
class Password(object):
"""
This holds the MPLS LDP password configuration for use
with a single LDP neighbor or group of LDP neighbors.
.. attribute:: nbr_vrf <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: nbr_id <key>
This leaf holds the neighbor id for this password. This id may be an lsr\-id, an ip\-address, or a filter describing a group of neighbors
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
----
**type**\: str
----
.. attribute:: password_num <key>
This is a user\-assigned unique number identifying a password for this neighbor or group of neighbors. Multiple passwords may be assigned to a neighbor. If that is the case, each password is tried starting with the lowest number to the highest until a passsword matches or the list is exhausted
**type**\: int
**range:** 0..4294967295
.. attribute:: clear_pass
This is a clear\-text (non\-encrypted password to be used with the neighbor
**type**\: str
.. attribute:: encrypt_pass
This is an encrypted password to be used with the neighbor
**type**\: str
.. attribute:: keychain_pass
This is a keychain identifier, which identifies an separately configured keychain to be used with the neighbor neighbor
**type**\: str
.. attribute:: pass_required
This leaf is set true if the password is required and false if the password is not required
**type**\: bool
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.nbr_vrf = None
self.nbr_id = None
self.password_num = None
self.clear_pass = None
self.encrypt_pass = None
self.keychain_pass = None
self.pass_required = None
@property
def _common_path(self):
if self.nbr_vrf is None:
raise YPYModelError('Key property nbr_vrf is None')
if self.nbr_id is None:
raise YPYModelError('Key property nbr_id is None')
if self.password_num is None:
raise YPYModelError('Key property password_num is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:passwords/Cisco-IOS-XE-mpls-ldp:password[Cisco-IOS-XE-mpls-ldp:nbr-vrf = ' + str(self.nbr_vrf) + '][Cisco-IOS-XE-mpls-ldp:nbr-id = ' + str(self.nbr_id) + '][Cisco-IOS-XE-mpls-ldp:password-num = ' + str(self.password_num) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.nbr_vrf is not None:
return True
if self.nbr_id is not None:
return True
if self.password_num is not None:
return True
if self.clear_pass is not None:
return True
if self.encrypt_pass is not None:
return True
if self.keychain_pass is not None:
return True
if self.pass_required is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Passwords.Password']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:passwords'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.password is not None:
for child_ref in self.password:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Passwords']['meta_info']
class Session(object):
"""
Configure session parameters
.. attribute:: backoff
Initial session backoff time (seconds)
**type**\: int
**range:** 0..4294967295
**default value**\: 15
.. attribute:: infinite
Ignore LDP session holdtime
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: seconds
Session holdtime in seconds
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.backoff = None
self.infinite = None
self.seconds = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:session'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.backoff is not None:
return True
if self.infinite is not None:
return True
if self.seconds is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Session']['meta_info']
class LabelCfg(object):
"""
This container holds the label allocation and
advertisement configuration for the LDP Label Information
Base. These control what prefixes may be allocated and
advertised to peers.
.. attribute:: label_af_cfg
This is an allocation filter and advertisement filters for LDP labels in this address family
**type**\: list of :py:class:`LabelAfCfg <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.LabelCfg.LabelAfCfg>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.label_af_cfg = YList()
self.label_af_cfg.parent = self
self.label_af_cfg.name = 'label_af_cfg'
class LabelAfCfg(object):
"""
This is an allocation filter and advertisement filters
for LDP labels in this address family.
.. attribute:: vrf_name <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: af_name <key>
Address Family name
**type**\: :py:class:`AfEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.AfEnum>`
.. attribute:: advt_filter
MPLS LDP Label advertisement filter restrictions
**type**\: list of :py:class:`AdvtFilter <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.LabelCfg.LabelAfCfg.AdvtFilter>`
.. attribute:: host_route_enable
True if this LSR should allocate host\-routes only
**type**\: bool
.. attribute:: prefix_filter
This contains the filter name for this label's prefix. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
**length:** 0..64
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf_name = None
self.af_name = None
self.advt_filter = YList()
self.advt_filter.parent = self
self.advt_filter.name = 'advt_filter'
self.host_route_enable = None
self.prefix_filter = None
class AdvtFilter(object):
"""
MPLS LDP Label advertisement filter restrictions.
.. attribute:: prefix_filter <key>
This contains the filter name for this label's prefix. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
**length:** 0..64
.. attribute:: peer_filter <key>
This contains the filter name for this label's Peer. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
**length:** 0..64
.. attribute:: interface <key>
This is an optional interface that may be used to restrict the scope of the label advertisement
**type**\: str
**refers to**\: :py:class:`name <ydk.models.ietf.ietf_interfaces.Interfaces.Interface>`
.. attribute:: adv_label_cfg
This leaf controls what type of label is advertised for matching prefixes to the matching peers
**type**\: :py:class:`AdvLabelTypeEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.AdvLabelTypeEnum>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.prefix_filter = None
self.peer_filter = None
self.interface = None
self.adv_label_cfg = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.prefix_filter is None:
raise YPYModelError('Key property prefix_filter is None')
if self.peer_filter is None:
raise YPYModelError('Key property peer_filter is None')
if self.interface is None:
raise YPYModelError('Key property interface is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:advt-filter[Cisco-IOS-XE-mpls-ldp:prefix-filter = ' + str(self.prefix_filter) + '][Cisco-IOS-XE-mpls-ldp:peer-filter = ' + str(self.peer_filter) + '][Cisco-IOS-XE-mpls-ldp:interface = ' + str(self.interface) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.prefix_filter is not None:
return True
if self.peer_filter is not None:
return True
if self.interface is not None:
return True
if self.adv_label_cfg is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.LabelCfg.LabelAfCfg.AdvtFilter']['meta_info']
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
if self.af_name is None:
raise YPYModelError('Key property af_name is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:label-cfg/Cisco-IOS-XE-mpls-ldp:label-af-cfg[Cisco-IOS-XE-mpls-ldp:vrf-name = ' + str(self.vrf_name) + '][Cisco-IOS-XE-mpls-ldp:af-name = ' + str(self.af_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.vrf_name is not None:
return True
if self.af_name is not None:
return True
if self.advt_filter is not None:
for child_ref in self.advt_filter:
if child_ref._has_data():
return True
if self.host_route_enable is not None:
return True
if self.prefix_filter is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.LabelCfg.LabelAfCfg']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:label-cfg'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.label_af_cfg is not None:
for child_ref in self.label_af_cfg:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.LabelCfg']['meta_info']
class Discovery(object):
"""
LDP discovery
.. attribute:: instance_tlv
Set this leaf to true to disable transmit and receive processing for Type\-Length\-Value (TLV) in the discovery messages
**type**\: bool
.. attribute:: int_trans_addrs
This list contains the per\-interface transport addresses, which overide the global and default values
**type**\: :py:class:`IntTransAddrs <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Discovery.IntTransAddrs>`
.. attribute:: link_hello
This container holds the parameters for the non\-targeted link hello
**type**\: :py:class:`LinkHello <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Discovery.LinkHello>`
.. attribute:: targeted_hello
This container holds the parameters for the targeted link hello
**type**\: :py:class:`TargetedHello <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Discovery.TargetedHello>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.instance_tlv = None
self.int_trans_addrs = MplsLdp.MplsLdpConfig.Discovery.IntTransAddrs()
self.int_trans_addrs.parent = self
self.link_hello = MplsLdp.MplsLdpConfig.Discovery.LinkHello()
self.link_hello.parent = self
self.targeted_hello = MplsLdp.MplsLdpConfig.Discovery.TargetedHello()
self.targeted_hello.parent = self
class LinkHello(object):
"""
This container holds the parameters for the non\-targeted
link hello.
.. attribute:: holdtime
LDP discovery link hello holdtime in seconds
**type**\: int
**range:** 0..65535
.. attribute:: interval
LDP discovery link hello interval in seconds
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.holdtime = None
self.interval = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:discovery/Cisco-IOS-XE-mpls-ldp:link-hello'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.holdtime is not None:
return True
if self.interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Discovery.LinkHello']['meta_info']
class TargetedHello(object):
"""
This container holds the parameters for the targeted
link hello.
.. attribute:: accept
Enables router to respond to requests for targeted hello messages
**type**\: :py:class:`Accept <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Discovery.TargetedHello.Accept>`
.. attribute:: enable
Set to true if targeted hello messages may be accepted
**type**\: bool
.. attribute:: holdtime
LDP discovery targeted hello holdtime in seconds
**type**\: int
**range:** 0..65535
.. attribute:: interval
LDP discovery targeted hello interval in seconds
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.accept = MplsLdp.MplsLdpConfig.Discovery.TargetedHello.Accept()
self.accept.parent = self
self.enable = None
self.holdtime = None
self.interval = None
class Accept(object):
"""
Enables router to respond to requests for targeted
hello messages
.. attribute:: enable
Set to true if targeted hello messages may be accepted
**type**\: bool
.. attribute:: src_filter
Only respond to requests for targeted hello messages from sources matching this filter. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.enable = None
self.src_filter = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:discovery/Cisco-IOS-XE-mpls-ldp:targeted-hello/Cisco-IOS-XE-mpls-ldp:accept'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.enable is not None:
return True
if self.src_filter is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Discovery.TargetedHello.Accept']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:discovery/Cisco-IOS-XE-mpls-ldp:targeted-hello'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.accept is not None and self.accept._has_data():
return True
if self.enable is not None:
return True
if self.holdtime is not None:
return True
if self.interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Discovery.TargetedHello']['meta_info']
class IntTransAddrs(object):
"""
This list contains the per\-interface transport
addresses, which overide the global and default
values.
.. attribute:: int_trans_addr
This entry contains the per\-interface transport addresses, which overide the global and default values
**type**\: list of :py:class:`IntTransAddr <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Discovery.IntTransAddrs.IntTransAddr>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.int_trans_addr = YList()
self.int_trans_addr.parent = self
self.int_trans_addr.name = 'int_trans_addr'
class IntTransAddr(object):
"""
This entry contains the per\-interface transport
addresses, which overide the global and default
values.
.. attribute:: af_name <key>
Address Family name
**type**\: :py:class:`AfEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.AfEnum>`
.. attribute:: int_name <key>
The Interface Name
**type**\: str
**refers to**\: :py:class:`name <ydk.models.ietf.ietf_interfaces.Interfaces.Interface>`
.. attribute:: trans_int
Advertise this interface's address as the address in LDP discovery hello messages and use it for LDP transport
**type**\: str
**refers to**\: :py:class:`name <ydk.models.ietf.ietf_interfaces.Interfaces.Interface>`
.. attribute:: trans_ip
Advertise this address as the address in LDP discovery hello messages and use it for LDP transport
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.af_name = None
self.int_name = None
self.trans_int = None
self.trans_ip = None
@property
def _common_path(self):
if self.af_name is None:
raise YPYModelError('Key property af_name is None')
if self.int_name is None:
raise YPYModelError('Key property int_name is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:discovery/Cisco-IOS-XE-mpls-ldp:int-trans-addrs/Cisco-IOS-XE-mpls-ldp:int-trans-addr[Cisco-IOS-XE-mpls-ldp:af-name = ' + str(self.af_name) + '][Cisco-IOS-XE-mpls-ldp:int-name = ' + str(self.int_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.af_name is not None:
return True
if self.int_name is not None:
return True
if self.trans_int is not None:
return True
if self.trans_ip is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Discovery.IntTransAddrs.IntTransAddr']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:discovery/Cisco-IOS-XE-mpls-ldp:int-trans-addrs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.int_trans_addr is not None:
for child_ref in self.int_trans_addr:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Discovery.IntTransAddrs']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:discovery'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.instance_tlv is not None:
return True
if self.int_trans_addrs is not None and self.int_trans_addrs._has_data():
return True
if self.link_hello is not None and self.link_hello._has_data():
return True
if self.targeted_hello is not None and self.targeted_hello._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Discovery']['meta_info']
class GracefulRestart(object):
"""
Configure LDP Graceful Restart
.. attribute:: forwarding_holding
Specifies the amount of time the MPLS LDP forwarding state must be preserved after the control plane restarts
**type**\: int
**range:** 5..300
**units**\: seconds
.. attribute:: helper
This contains the filter name for peers for which this LSR will act as a graceful\-restart helper
**type**\: list of :py:class:`Helper <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.GracefulRestart.Helper>`
.. attribute:: is_graceful_restartable
Enable graceful restartable
**type**\: bool
.. attribute:: max_recovery
Amount of time (in seconds) that the router should hold stale label\-FEC bindings after an LDP session has been reestablished
**type**\: int
**range:** 5..300
**units**\: seconds
.. attribute:: nbr_liveness
Amount of time (in seconds) that the router must wait for an LDP session to be reestablished
**type**\: int
**range:** 5..300
**units**\: seconds
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.forwarding_holding = None
self.helper = YList()
self.helper.parent = self
self.helper.name = 'helper'
self.is_graceful_restartable = None
self.max_recovery = None
self.nbr_liveness = None
class Helper(object):
"""
This contains the filter name for peers for which this
LSR will act as a graceful\-restart helper.
.. attribute:: helper_vrf <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: helper_filter <key>
This contains the filter name for peers for which this LSR will act as a graceful\-restart helper. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
**length:** 0..64
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.helper_vrf = None
self.helper_filter = None
@property
def _common_path(self):
if self.helper_vrf is None:
raise YPYModelError('Key property helper_vrf is None')
if self.helper_filter is None:
raise YPYModelError('Key property helper_filter is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:graceful-restart/Cisco-IOS-XE-mpls-ldp:helper[Cisco-IOS-XE-mpls-ldp:helper-vrf = ' + str(self.helper_vrf) + '][Cisco-IOS-XE-mpls-ldp:helper-filter = ' + str(self.helper_filter) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.helper_vrf is not None:
return True
if self.helper_filter is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.GracefulRestart.Helper']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:graceful-restart'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.forwarding_holding is not None:
return True
if self.helper is not None:
for child_ref in self.helper:
if child_ref._has_data():
return True
if self.is_graceful_restartable is not None:
return True
if self.max_recovery is not None:
return True
if self.nbr_liveness is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.GracefulRestart']['meta_info']
class Logging(object):
"""
Enable LDP logging
.. attribute:: adjacency
Enable logging of adjacency messages
**type**\: bool
.. attribute:: graceful_restart
Enable logging of graceful\-restart messages
**type**\: bool
.. attribute:: neighbor
Enable logging of neighbor messages
**type**\: bool
.. attribute:: nsr
Enable logging of nsr messages
**type**\: bool
.. attribute:: password
Enable logging of password messages
**type**\: :py:class:`Password <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Logging.Password>`
.. attribute:: session_protection
Enable logging of session\-protection messages
**type**\: bool
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.adjacency = None
self.graceful_restart = None
self.neighbor = None
self.nsr = None
self.password = MplsLdp.MplsLdpConfig.Logging.Password()
self.password.parent = self
self.session_protection = None
class Password(object):
"""
Enable logging of password messages.
.. attribute:: config_msg
Log MPLS LDP password configuration changes
**type**\: :py:class:`ConfigMsg <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Logging.Password.ConfigMsg>`
.. attribute:: rollover_msg
Log MPLS LDP password rollover messages
**type**\: :py:class:`RolloverMsg <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Logging.Password.RolloverMsg>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.config_msg = MplsLdp.MplsLdpConfig.Logging.Password.ConfigMsg()
self.config_msg.parent = self
self.rollover_msg = MplsLdp.MplsLdpConfig.Logging.Password.RolloverMsg()
self.rollover_msg.parent = self
class ConfigMsg(object):
"""
Log MPLS LDP password configuration changes.
.. attribute:: enable
Log MPLS LDP password configuration changes
**type**\: bool
.. attribute:: rate_limit
This is the number of messages per minute to limit the logging. A value of 0 indicates no limits on the number of logged messages
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.enable = None
self.rate_limit = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:logging/Cisco-IOS-XE-mpls-ldp:password/Cisco-IOS-XE-mpls-ldp:config-msg'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.enable is not None:
return True
if self.rate_limit is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Logging.Password.ConfigMsg']['meta_info']
class RolloverMsg(object):
"""
Log MPLS LDP password rollover messages.
.. attribute:: enable
Log MPLS LDP password rollover messages
**type**\: bool
.. attribute:: rate_limit
This is the number of messages per minute to limit the logging. A value of 0 indicates no limits on the number of logged messages
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.enable = None
self.rate_limit = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:logging/Cisco-IOS-XE-mpls-ldp:password/Cisco-IOS-XE-mpls-ldp:rollover-msg'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.enable is not None:
return True
if self.rate_limit is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Logging.Password.RolloverMsg']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:logging/Cisco-IOS-XE-mpls-ldp:password'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.config_msg is not None and self.config_msg._has_data():
return True
if self.rollover_msg is not None and self.rollover_msg._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Logging.Password']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:logging'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.adjacency is not None:
return True
if self.graceful_restart is not None:
return True
if self.neighbor is not None:
return True
if self.nsr is not None:
return True
if self.password is not None and self.password._has_data():
return True
if self.session_protection is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Logging']['meta_info']
class Interfaces(object):
"""
MPLS LDP Interface configuration commands.
.. attribute:: interface
MPLS LDP Interface configuration commands. Where a corresponding global configuration command exists, the interface level command will take precedence when configured
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Interfaces.Interface>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
MPLS LDP Interface configuration commands. Where a
corresponding global configuration command exists, the
interface level command will take precedence when
configured.
.. attribute:: vrf <key>
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
.. attribute:: interface <key>
The Interface Name
**type**\: str
**refers to**\: :py:class:`name <ydk.models.ietf.ietf_interfaces.Interfaces.Interface>`
.. attribute:: afs
Address Family specific operational data
**type**\: :py:class:`Afs <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Interfaces.Interface.Afs>`
.. attribute:: disable_delay
This choice causes IGP sync up immediately upon session up
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: disable_quick_start_int
When set to true, disable LDP discovery's quick start mode for this interface
**type**\: bool
.. attribute:: link_hello_hold
LDP discovery link hello holdtime in seconds for this interface. This value overides the global setting
**type**\: int
**range:** 0..4294967295
**units**\: second
**default value**\: 15
.. attribute:: link_hello_int
LDP discovery link hello interval in seconds for this interface. This value overides the global setting
**type**\: int
**range:** 0..4294967295
**units**\: second
**default value**\: 5
.. attribute:: seconds
Time in seconds to delay IGP sync after session comes up
**type**\: int
**range:** 5..300
**units**\: second
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.vrf = None
self.interface = None
self.afs = MplsLdp.MplsLdpConfig.Interfaces.Interface.Afs()
self.afs.parent = self
self.disable_delay = None
self.disable_quick_start_int = None
self.link_hello_hold = None
self.link_hello_int = None
self.seconds = None
class Afs(object):
"""
Address Family specific operational data
.. attribute:: af
MPLS LDP Operational data for this Address Family
**type**\: list of :py:class:`Af <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Interfaces.Interface.Afs.Af>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.af = YList()
self.af.parent = self
self.af.name = 'af'
class Af(object):
"""
MPLS LDP Operational data for this Address Family.
.. attribute:: af_name <key>
Address Family name
**type**\: :py:class:`AfEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.AfEnum>`
.. attribute:: autoconfig_disable
True if LDP autoconfig is explicitly disabled on this interface
**type**\: bool
.. attribute:: bgp_redist
MPLS LDP configuration for protocol redistribution. By default, redistribution of BGP routes is disabled. It can be enabled for all BGP routes or for a specific AS. Also it can be redistributed to all LDP peers or to a filtered group of peers
**type**\: :py:class:`BgpRedist <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Interfaces.Interface.Afs.Af.BgpRedist>`
.. attribute:: enable
This is set true to enable LDP on this interface
**type**\: bool
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.af_name = None
self.autoconfig_disable = None
self.bgp_redist = MplsLdp.MplsLdpConfig.Interfaces.Interface.Afs.Af.BgpRedist()
self.bgp_redist.parent = self
self.enable = None
class BgpRedist(object):
"""
MPLS LDP configuration for protocol
redistribution. By default, redistribution of BGP
routes is disabled. It can be enabled for all
BGP routes or for a specific AS. Also it can be
redistributed to all LDP peers or to a filtered
group of peers.
.. attribute:: advertise_to
Filter of neighbors to receive BGP route redistributions from LDP. If the list is empty or unset, all LDP neighbors will receive redistributions
**type**\: str
.. attribute:: as_xx
First half of BGP AS number in XX.YY format. Mandatory Must be a non\-zero value if second half is zero
**type**\: int
**range:** 0..65535
.. attribute:: as_yy
Second half of BGP AS number in XX.YY format. Mandatory Must be a non\-zero value if first half is zero
**type**\: int
**range:** 0..4294967295
.. attribute:: enable
This is set true to allow LDP to redistribute BGP routes
**type**\: bool
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.advertise_to = None
self.as_xx = None
self.as_yy = None
self.enable = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:bgp-redist'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.advertise_to is not None:
return True
if self.as_xx is not None:
return True
if self.as_yy is not None:
return True
if self.enable is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Interfaces.Interface.Afs.Af.BgpRedist']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.af_name is None:
raise YPYModelError('Key property af_name is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:af[Cisco-IOS-XE-mpls-ldp:af-name = ' + str(self.af_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.af_name is not None:
return True
if self.autoconfig_disable is not None:
return True
if self.bgp_redist is not None and self.bgp_redist._has_data():
return True
if self.enable is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Interfaces.Interface.Afs.Af']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-ldp:afs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.af is not None:
for child_ref in self.af:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Interfaces.Interface.Afs']['meta_info']
@property
def _common_path(self):
if self.vrf is None:
raise YPYModelError('Key property vrf is None')
if self.interface is None:
raise YPYModelError('Key property interface is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:interfaces/Cisco-IOS-XE-mpls-ldp:interface[Cisco-IOS-XE-mpls-ldp:vrf = ' + str(self.vrf) + '][Cisco-IOS-XE-mpls-ldp:interface = ' + str(self.interface) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.vrf is not None:
return True
if self.interface is not None:
return True
if self.afs is not None and self.afs._has_data():
return True
if self.disable_delay is not None:
return True
if self.disable_quick_start_int is not None:
return True
if self.link_hello_hold is not None:
return True
if self.link_hello_int is not None:
return True
if self.seconds is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Interfaces']['meta_info']
class Routing(object):
"""
This containter provides the MPLS LDP config for routing
protocols from which it can obtain addresses to
associate with labels.
.. attribute:: routing_inst
This entry provides the MPLS LDP config for this routing instance
**type**\: list of :py:class:`RoutingInst <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Routing.RoutingInst>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.routing_inst = YList()
self.routing_inst.parent = self
self.routing_inst.name = 'routing_inst'
class RoutingInst(object):
"""
This entry provides the MPLS LDP config for this
routing instance.
.. attribute:: routing_inst_name <key>
Name of the routing instance for which this MPLS LDP configuration applies
**type**\: str
.. attribute:: area_id
This leaf restricts the LDP Autoconfiguration feature to enable LDP on interfaces belonging to an OSPF process for a specific area. If no area is specified, then this applies to all interfaces associated with the. If an area ID is specified, then only interfaces associated with that OSPF area are automatically enabled with LDP. Any interface\-specific ldp configuration will overide this setting for that interface
**type**\: int
**range:** 0..4294967295
.. attribute:: autoconfig_enable
This leaf enables or disables LDP for all interfaces covered by this routing instance subject to the autoconfig\-scope
**type**\: bool
.. attribute:: level_id
This leaf restricts the LDP Autoconfiguration feature to enable LDP on interfaces belonging to an ISIS process for a specific level. If no level is specified, then this applies to all interfaces associated with the. If a level is specified, then only interfaces associated with that ISIS level are automatically enabled with LDP. Any interface\-specific ldp configuration will overide this setting for that interface
**type**\: :py:class:`LevelIdEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.MplsLdp.MplsLdpConfig.Routing.RoutingInst.LevelIdEnum>`
.. attribute:: sync
When set to true this enables LDP IGP synchronization. Without syncrhonization, packet loss can occur because the actions of the IGP and LDP are not synchronized
**type**\: bool
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.routing_inst_name = None
self.area_id = None
self.autoconfig_enable = None
self.level_id = None
self.sync = None
class LevelIdEnum(Enum):
"""
LevelIdEnum
This leaf restricts the LDP Autoconfiguration
feature to enable LDP on interfaces belonging to
an ISIS process for a specific level. If no level
is specified, then this applies to all interfaces
associated with the. If a level is specified,
then only interfaces associated with that ISIS
level are automatically enabled with LDP.
Any interface\-specific ldp configuration will
overide this setting for that interface.
.. data:: level_1 = 1
This leaf restricts the LDP Autoconfiguration
feature to enable LDP on interfaces belonging
to an IS-IS process level 1.
Any interface-specific ldp configuration will
overide this setting for that interface.
.. data:: level_2 = 2
This leaf restricts the LDP Autoconfiguration
feature to enable LDP on interfaces belonging
to an IS-IS process level 1.
Any interface-specific ldp configuration will
overide this setting for that interface.
.. data:: level_1_2 = 3
This leaf restricts the LDP Autoconfiguration
feature to enable LDP on interfaces belonging
to an IS-IS process level 2.
Any interface-specific ldp configuration will
overide this setting for that interface.
"""
level_1 = 1
level_2 = 2
level_1_2 = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Routing.RoutingInst.LevelIdEnum']
@property
def _common_path(self):
if self.routing_inst_name is None:
raise YPYModelError('Key property routing_inst_name is None')
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:routing/Cisco-IOS-XE-mpls-ldp:routing-inst[Cisco-IOS-XE-mpls-ldp:routing-inst-name = ' + str(self.routing_inst_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.routing_inst_name is not None:
return True
if self.area_id is not None:
return True
if self.autoconfig_enable is not None:
return True
if self.level_id is not None:
return True
if self.sync is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Routing.RoutingInst']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:routing'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.routing_inst is not None:
for child_ref in self.routing_inst:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.Routing']['meta_info']
class DualStack(object):
"""
This container holds the configuration of dual IPv4 and
IPv6 stack peers.
.. attribute:: max_wait
Wait time in seconds (0 indicates no preference)
**type**\: int
**range:** 0..60
.. attribute:: prefer_ipv4_peers
This contains the filter name for peers where IPv4 connections are preferred over IPv6 connections. The filter type is device specific and could be an ACL, a prefix list, or other mechanism
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.max_wait = None
self.prefer_ipv4_peers = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config/Cisco-IOS-XE-mpls-ldp:dual-stack'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.max_wait is not None:
return True
if self.prefer_ipv4_peers is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig.DualStack']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp/Cisco-IOS-XE-mpls-ldp:mpls-ldp-config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.discovery is not None and self.discovery._has_data():
return True
if self.dual_stack is not None and self.dual_stack._has_data():
return True
if self.global_cfg is not None and self.global_cfg._has_data():
return True
if self.graceful_restart is not None and self.graceful_restart._has_data():
return True
if self.interfaces is not None and self.interfaces._has_data():
return True
if self.label_cfg is not None and self.label_cfg._has_data():
return True
if self.logging is not None and self.logging._has_data():
return True
if self.nbr_table is not None and self.nbr_table._has_data():
return True
if self.passwords is not None and self.passwords._has_data():
return True
if self.routing is not None and self.routing._has_data():
return True
if self.session is not None and self.session._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp.MplsLdpConfig']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:mpls-ldp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.mpls_ldp_config is not None and self.mpls_ldp_config._has_data():
return True
if self.mpls_ldp_state is not None and self.mpls_ldp_state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['MplsLdp']['meta_info']
class ClearMsgCountersRpc(object):
"""
This RPC clears the LDP message counters for either a single
neighbor or for all neighbors.
.. attribute:: input
**type**\: :py:class:`Input <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.ClearMsgCountersRpc.Input>`
.. attribute:: output
**type**\: :py:class:`Output <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.ClearMsgCountersRpc.Output>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.input = ClearMsgCountersRpc.Input()
self.input.parent = self
self.output = ClearMsgCountersRpc.Output()
self.output.parent = self
self.is_rpc = True
class Input(object):
"""
.. attribute:: all
Clear information for all neighbors
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: nbr_ip
LSR ID of the neighbor
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: vrf_name
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.all = None
self.nbr_ip = None
self.vrf_name = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:clear-msg-counters/Cisco-IOS-XE-mpls-ldp:input'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
if self.parent is None:
raise YPYError('Parent reference is needed to determine if entity has configuration data')
return self.parent.is_config()
def _has_data(self):
if self.all is not None:
return True
if self.nbr_ip is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['ClearMsgCountersRpc.Input']['meta_info']
class Output(object):
"""
.. attribute:: status
Return status will be 'OK' on success or an explanation string on failure
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.status = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:clear-msg-counters/Cisco-IOS-XE-mpls-ldp:output'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
if self.parent is None:
raise YPYError('Parent reference is needed to determine if entity has configuration data')
return self.parent.is_config()
def _has_data(self):
if self.status is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['ClearMsgCountersRpc.Output']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:clear-msg-counters'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.input is not None and self.input._has_data():
return True
if self.output is not None and self.output._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['ClearMsgCountersRpc']['meta_info']
class RestartNeighborRpc(object):
"""
This RPC restarts a single LDP session or all LDP sessions,
but does not restart the LDP process itself, if the device
supports that capability.
.. attribute:: input
**type**\: :py:class:`Input <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.RestartNeighborRpc.Input>`
.. attribute:: output
**type**\: :py:class:`Output <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.RestartNeighborRpc.Output>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.input = RestartNeighborRpc.Input()
self.input.parent = self
self.output = RestartNeighborRpc.Output()
self.output.parent = self
self.is_rpc = True
class Input(object):
"""
.. attribute:: all
Restart sessions for all neighbors
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: nbr_ip
LSR ID of the neighbor
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: vrf_name
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.all = None
self.nbr_ip = None
self.vrf_name = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:restart-neighbor/Cisco-IOS-XE-mpls-ldp:input'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
if self.parent is None:
raise YPYError('Parent reference is needed to determine if entity has configuration data')
return self.parent.is_config()
def _has_data(self):
if self.all is not None:
return True
if self.nbr_ip is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RestartNeighborRpc.Input']['meta_info']
class Output(object):
"""
.. attribute:: status
Return status will be 'OK' on success or an explanation string on failure
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.status = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:restart-neighbor/Cisco-IOS-XE-mpls-ldp:output'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
if self.parent is None:
raise YPYError('Parent reference is needed to determine if entity has configuration data')
return self.parent.is_config()
def _has_data(self):
if self.status is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RestartNeighborRpc.Output']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:restart-neighbor'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.input is not None and self.input._has_data():
return True
if self.output is not None and self.output._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RestartNeighborRpc']['meta_info']
class ClearForwardingRpc(object):
"""
This command resets LDP installed forwarding state for all
prefixes or a given prefix. It is useful when installed
LDP forwarding state needs to be reprogrammed in LSD and
MPLS forwarding.
.. attribute:: input
**type**\: :py:class:`Input <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.ClearForwardingRpc.Input>`
.. attribute:: output
**type**\: :py:class:`Output <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp.ClearForwardingRpc.Output>`
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.input = ClearForwardingRpc.Input()
self.input.parent = self
self.output = ClearForwardingRpc.Output()
self.output.parent = self
self.is_rpc = True
class Input(object):
"""
.. attribute:: all
This case is used to clear the forwarding entries for all prefixes
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: prefix_ip
This case provides the IP prefix for the forwarding entry whose data should be cleared
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: vrf_name
This contains the VRF Name, where 'default' is used for the default vrf
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.all = None
self.prefix_ip = None
self.vrf_name = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:clear-forwarding/Cisco-IOS-XE-mpls-ldp:input'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
if self.parent is None:
raise YPYError('Parent reference is needed to determine if entity has configuration data')
return self.parent.is_config()
def _has_data(self):
if self.all is not None:
return True
if self.prefix_ip is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['ClearForwardingRpc.Input']['meta_info']
class Output(object):
"""
.. attribute:: status
Return status will be 'OK' on success or an explanatory string on failure
**type**\: str
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.status = None
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:clear-forwarding/Cisco-IOS-XE-mpls-ldp:output'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
if self.parent is None:
raise YPYError('Parent reference is needed to determine if entity has configuration data')
return self.parent.is_config()
def _has_data(self):
if self.status is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['ClearForwardingRpc.Output']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-ldp:clear-forwarding'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.input is not None and self.input._has_data():
return True
if self.output is not None and self.output._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['ClearForwardingRpc']['meta_info']
class NsrPeerSyncErrSyncPrepIdentity(NsrPeerSyncErrIdentity):
"""
LDP Peer Sync failed, synch prep.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncErrIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrPeerSyncErrSyncPrepIdentity']['meta_info']
class NsrSyncNackRsnPEndSockNotSyncedIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed because the P end sock was not synced.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnPEndSockNotSyncedIdentity']['meta_info']
class LabelTypeMplsIdentity(LabelTypeIdentity):
"""
The is an MPLS Label.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
LabelTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LabelTypeMplsIdentity']['meta_info']
class RoutePathLblOwnerLdpIdentity(RoutePathLblOwnerIdentity):
"""
Path outgoing label owned by LDP.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
RoutePathLblOwnerIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RoutePathLblOwnerLdpIdentity']['meta_info']
class LabelTypeUnLabeledIdentity(LabelTypeIdentity):
"""
This is unlabeled
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
LabelTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LabelTypeUnLabeledIdentity']['meta_info']
class NsrPeerSyncErrNoneIdentity(NsrPeerSyncErrIdentity):
"""
No error.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncErrIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrPeerSyncErrNoneIdentity']['meta_info']
class NsrSyncNackRsnErrRxUnexpOpenIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed due to an unexpected open.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnErrRxUnexpOpenIdentity']['meta_info']
class DownNbrReasonNaIdentity(DownNbrReasonIdentity):
"""
Not applicable, the neighbor is up..
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
DownNbrReasonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['DownNbrReasonNaIdentity']['meta_info']
class DownNbrReasonDiscHelloIdentity(DownNbrReasonIdentity):
"""
The local discovery hello timer expired..
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
DownNbrReasonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['DownNbrReasonDiscHelloIdentity']['meta_info']
class NsrPeerSyncErrLdpPeerIdentity(NsrPeerSyncErrIdentity):
"""
LDP Peer Sync failed, ldp peer
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncErrIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrPeerSyncErrLdpPeerIdentity']['meta_info']
class RoutePathIpNoFlagIdentity(RoutePathTypeIdentity):
"""
A primary path with no special flag/attribute
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
RoutePathTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RoutePathIpNoFlagIdentity']['meta_info']
class NsrPeerSyncErrTcpPeerIdentity(NsrPeerSyncErrIdentity):
"""
LDP Peer Sync failed, tcp peer
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncErrIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrPeerSyncErrTcpPeerIdentity']['meta_info']
class LdpNsrPeerSyncStOperIdentity(NsrPeerSyncStateIdentity):
"""
LDP NSR peer synchronization is operational.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncStateIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LdpNsrPeerSyncStOperIdentity']['meta_info']
class LdpNsrPeerSyncStPrepIdentity(NsrPeerSyncStateIdentity):
"""
LDP NSR peer synchronization is prep.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncStateIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LdpNsrPeerSyncStPrepIdentity']['meta_info']
class NsrSyncNackRsnErrDhcAddIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed with a error creating the directed hello control
infrastructure.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnErrDhcAddIdentity']['meta_info']
class LabelTypeUnknownIdentity(LabelTypeIdentity):
"""
The label is unknown.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
LabelTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LabelTypeUnknownIdentity']['meta_info']
class RoutePathIpBackupRemoteIdentity(RoutePathTypeIdentity):
"""
A non\-primary remote LFA FRR (pure) backup path
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
RoutePathTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RoutePathIpBackupRemoteIdentity']['meta_info']
class IgpSyncDownReasonNoHelloAdjIdentity(IgpSyncDownReasonIdentity):
"""
No hello adjacency.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
IgpSyncDownReasonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IgpSyncDownReasonNoHelloAdjIdentity']['meta_info']
class NsrSyncNackRsnErrAdjAddIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed due to an error adding the adjacency.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnErrAdjAddIdentity']['meta_info']
class RoutePathLblOwnerNoneIdentity(RoutePathLblOwnerIdentity):
"""
No label and no owner.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
RoutePathLblOwnerIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RoutePathLblOwnerNoneIdentity']['meta_info']
class IgpSyncDownReasonNaIdentity(IgpSyncDownReasonIdentity):
"""
Not Applicable.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
IgpSyncDownReasonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IgpSyncDownReasonNaIdentity']['meta_info']
class IgpSyncDownReasonNoPeerSessIdentity(IgpSyncDownReasonIdentity):
"""
No peer session.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
IgpSyncDownReasonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IgpSyncDownReasonNoPeerSessIdentity']['meta_info']
class RoutePathLblOwnerBgpIdentity(RoutePathLblOwnerIdentity):
"""
Path outgoing label owned by BGP.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
RoutePathLblOwnerIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RoutePathLblOwnerBgpIdentity']['meta_info']
class IgpSyncDownReasonPeerUpdateNotReceivedIdentity(IgpSyncDownReasonIdentity):
"""
Initial update from peer not received yet.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
IgpSyncDownReasonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IgpSyncDownReasonPeerUpdateNotReceivedIdentity']['meta_info']
class NsrSyncNackRsnMissingElemIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed due to a Missing Element.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnMissingElemIdentity']['meta_info']
class NsrSyncNackRsnNoPEndSockIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed because there was no P end socket.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnNoPEndSockIdentity']['meta_info']
class NsrSyncNackRsnNoCtxIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed with a no context error.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnNoCtxIdentity']['meta_info']
class LdpNsrPeerSyncStReadyIdentity(NsrPeerSyncStateIdentity):
"""
LDP NSR peer synchronization is ready.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncStateIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LdpNsrPeerSyncStReadyIdentity']['meta_info']
class RoutePathIpBackupIdentity(RoutePathTypeIdentity):
"""
A non\-primary local LFA FRR (pure) backup path
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
RoutePathTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RoutePathIpBackupIdentity']['meta_info']
class NsrPeerSyncErrAppFailIdentity(NsrPeerSyncErrIdentity):
"""
LDP Peer Sync failed, app fail
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncErrIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrPeerSyncErrAppFailIdentity']['meta_info']
class NsrPeerSyncErrLdpSyncNackIdentity(NsrPeerSyncErrIdentity):
"""
LDP Peer Sync failed, received sync nack.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncErrIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrPeerSyncErrLdpSyncNackIdentity']['meta_info']
class NsrSyncNackRsnErrAddrBindIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed to bind address.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnErrAddrBindIdentity']['meta_info']
class LdpNsrPeerSyncStAppWaitIdentity(NsrPeerSyncStateIdentity):
"""
LDP NSR peer synchronization is app wait.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncStateIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LdpNsrPeerSyncStAppWaitIdentity']['meta_info']
class LdpNsrPeerSyncStWaitIdentity(NsrPeerSyncStateIdentity):
"""
LDP NSR peer synchronization is wait.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncStateIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LdpNsrPeerSyncStWaitIdentity']['meta_info']
class NsrSyncNackRsnErrAppNotFoundIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed due to app not found.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnErrAppNotFoundIdentity']['meta_info']
class NsrSyncNackRsnErrRxNotifIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed with a received notification error.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnErrRxNotifIdentity']['meta_info']
class DownNbrReasonNbrHoldIdentity(DownNbrReasonIdentity):
"""
The neighbor sent error, hold time expired..
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
DownNbrReasonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['DownNbrReasonNbrHoldIdentity']['meta_info']
class NsrSyncNackRsnErrRxBadPieIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed, received a bad PIE.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnErrRxBadPieIdentity']['meta_info']
class NsrSyncNackRsnNoneIdentity(NsrSyncNackRsnIdentity):
"""
None
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnNoneIdentity']['meta_info']
class NsrPeerSyncErrTcpGblIdentity(NsrPeerSyncErrIdentity):
"""
LDP Peer Sync failed, tcp gbl
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncErrIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrPeerSyncErrTcpGblIdentity']['meta_info']
class RoutePathLblOwnerStaticIdentity(RoutePathLblOwnerIdentity):
"""
Path outgoing label statically configured.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
RoutePathLblOwnerIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RoutePathLblOwnerStaticIdentity']['meta_info']
class NsrSyncNackRsnTblIdMismatchIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed with a table ID mismatch.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnTblIdMismatchIdentity']['meta_info']
class NsrSyncNackRsnPpExistsIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed with because pp already exists.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnPpExistsIdentity']['meta_info']
class IgpSyncDownReasonInternalIdentity(IgpSyncDownReasonIdentity):
"""
Internal reason.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
IgpSyncDownReasonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IgpSyncDownReasonInternalIdentity']['meta_info']
class IccpTypeMlacpIdentity(IccpTypeIdentity):
"""
MLACP Multi\-chassic Link Aggregation Control Protocol.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
IccpTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IccpTypeMlacpIdentity']['meta_info']
class NsrSyncNackRsnEnomemIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed due to an out of memory error.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnEnomemIdentity']['meta_info']
class IgpSyncDownReasonPeerUpdateNotDoneIdentity(IgpSyncDownReasonIdentity):
"""
Initial update to peer not done yet.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
IgpSyncDownReasonIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IgpSyncDownReasonPeerUpdateNotDoneIdentity']['meta_info']
class RoutePathIpBgpBackupIdentity(RoutePathTypeIdentity):
"""
A non\-primary BGP backup path
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
RoutePathTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RoutePathIpBgpBackupIdentity']['meta_info']
class NsrPeerSyncErrLdpGblIdentity(NsrPeerSyncErrIdentity):
"""
LDP Peer Sync failed, ldp gbl
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncErrIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrPeerSyncErrLdpGblIdentity']['meta_info']
class LdpNsrPeerSyncStNoneIdentity(NsrPeerSyncStateIdentity):
"""
LDP NSR peer synchronization none.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrPeerSyncStateIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['LdpNsrPeerSyncStNoneIdentity']['meta_info']
class IcpmTypeIccpIdentity(IcpmTypeIdentity):
"""
ICCP Interchassis Communication Protocol.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
IcpmTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['IcpmTypeIccpIdentity']['meta_info']
class NsrStatusDisabledIdentity(NsrStatusIdentity):
"""
NSR is not enabled.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrStatusIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrStatusDisabledIdentity']['meta_info']
class NsrStatusReadyIdentity(NsrStatusIdentity):
"""
Device is NSR Ready.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrStatusIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrStatusReadyIdentity']['meta_info']
class NsrSyncNackRsnErrPpCreateIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed creating the pp.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnErrPpCreateIdentity']['meta_info']
class NsrSyncNackRsnErrTpCreateIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed creating the tp.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnErrTpCreateIdentity']['meta_info']
class NsrSyncNackRsnErrUnexpPeerDownIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed due to unexpected peer down.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnErrUnexpPeerDownIdentity']['meta_info']
class RoutePathIpProtectedIdentity(RoutePathTypeIdentity):
"""
A primary path with LFA FRR protection
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
RoutePathTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['RoutePathIpProtectedIdentity']['meta_info']
class NsrSyncNackRsnErrAppInvalidIdentity(NsrSyncNackRsnIdentity):
"""
NSR failed due to an app invalid error.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrSyncNackRsnIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrSyncNackRsnErrAppInvalidIdentity']['meta_info']
class NsrStatusNotReadyIdentity(NsrStatusIdentity):
"""
Device is not NSR Ready.
"""
_prefix = 'mpls-ldp-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
NsrStatusIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_ldp as meta
return meta._meta_table['NsrStatusNotReadyIdentity']['meta_info']
|
{
"content_hash": "a40317699f64b4495ef4653ed9324c18",
"timestamp": "",
"source": "github",
"line_count": 13983,
"max_line_length": 882,
"avg_line_length": 38.275191303725954,
"alnum_prop": 0.4504112465947437,
"repo_name": "111pontes/ydk-py",
"id": "8fa0715d5888f6d0dd1ac74077a15ed27406aaa5",
"size": "535202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xe/ydk/models/cisco_ios_xe/Cisco_IOS_XE_mpls_ldp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
}
|
from django import forms
from django.template.loader import render_to_string
from uni_form.helpers import FormHelper, Submit, Reset
from uni_form.helpers import Layout, Fieldset, Column, Row, HTML
class TestForm(forms.Form):
character_field = forms.CharField(label="Character Field", help_text="I am help text", max_length=30, required=True, widget=forms.TextInput())
url_field = forms.URLField(label='URL field', verify_exists=False, max_length=100, required=True, widget=forms.TextInput())
textarea_field = forms.CharField(label='Textareafield', required=True, widget=forms.Textarea())
hidden_field = forms.CharField(label='textarea_field', required=True, widget=forms.HiddenInput())
class HelperTestForm(TestForm):
# Attach a formHelper to your forms class.
helper = FormHelper()
# Add in a class and id
helper.form_id = 'this-form-rocks'
helper.form_class = 'search'
# add in a submit and reset button
submit = Submit('enter','enter some data')
helper.add_input(submit)
reset = Reset('reset','reset button')
helper.add_input(reset)
class LayoutTestForm(forms.Form):
is_company = forms.CharField(label="company", required=False, widget=forms.CheckboxInput())
email = forms.CharField(label="email", max_length=30, required=True, widget=forms.TextInput())
password1 = forms.CharField(label="password", max_length=30, required=True, widget=forms.PasswordInput())
password2 = forms.CharField(label="re-enter password", max_length=30, required=True, widget=forms.PasswordInput())
first_name = forms.CharField(label="first name", max_length=30, required=True, widget=forms.TextInput())
last_name = forms.CharField(label="last name", max_length=30, required=True, widget=forms.TextInput())
# Attach a formHelper to your forms class.
helper = FormHelper()
# create some HTML that you want in the page
style = """
<style>
.formRow {
color: red;
}
</style>
"""
# create the layout object
layout = Layout(
# first fieldset shows the company
Fieldset('', 'is_company'),
# second fieldset shows the contact info
Fieldset('Contact details',
HTML(style),
'email',
Row('password1','password2'),
'first_name',
'last_name',
)
)
helper.add_layout(layout)
submit = Submit('add','Add this contact')
helper.add_input(submit)
class ComplexLayoutTest(forms.Form):
"""
TODO: get digi604 to make this work
help_text = render_to_string("example/help_text.html")
layout = Layout(Fieldset(_('Basic Settings'),
'title',
'type',
'available_date',
),
Fieldset(_('Overview'),
Column(Fieldset(_('Object address'),
Row('address', 'street_number'),
Row('zip', 'city'),
'area',
),
Fieldset(_("Next public transport"),
'train_station',
Row('tram_station','tram_number'),
Row('bus_station','bus_number'),
),
),
Column("is_for_rent",
Fieldset(_("Rent"),
'rent-price',
),
Fieldset(_("Sell"),
'buy_price',
),
Fieldset(_("Measurements"),
'floor_space',
'room_height',
'construction_year',
),
),
Fieldset(_('Additional Function'),
HTML('<p class="tip">%s</p>' % unicode(help_text)),
'features',
),
Fieldset(_("Description"),
"description")
)
helper.add_layout(layout)
"""
|
{
"content_hash": "b8595aed04c1f0b24d73ccb891cd2d2f",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 146,
"avg_line_length": 41.083333333333336,
"alnum_prop": 0.44949290060851926,
"repo_name": "codepython/CollectorCity-Market-Place",
"id": "8daa73a00cc31896831c83a91ff2cc8dc2716790",
"size": "4930",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "stores/apps/uni_form/tests/test_project/test_app/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "863646"
},
{
"name": "HTML",
"bytes": "475154"
},
{
"name": "JavaScript",
"bytes": "693720"
},
{
"name": "Python",
"bytes": "1860719"
},
{
"name": "Shell",
"bytes": "1174"
}
],
"symlink_target": ""
}
|
import mock
from openstackclient.common import exceptions
from openstackclient.network import common
from openstackclient.tests import utils
RESOURCE = 'resource'
RESOURCES = 'resources'
NAME = 'matrix'
ID = 'Fishburne'
class TestFind(utils.TestCase):
def setUp(self):
super(TestFind, self).setUp()
self.mock_client = mock.Mock()
self.list_resources = mock.Mock()
self.mock_client.list_resources = self.list_resources
self.matrix = {'id': ID}
def test_name(self):
self.list_resources.return_value = {RESOURCES: [self.matrix]}
result = common.find(self.mock_client, RESOURCE, RESOURCES, NAME)
self.assertEqual(ID, result)
self.list_resources.assert_called_with(fields='id', name=NAME)
def test_id(self):
self.list_resources.side_effect = [{RESOURCES: []},
{RESOURCES: [self.matrix]}]
result = common.find(self.mock_client, RESOURCE, RESOURCES, NAME)
self.assertEqual(ID, result)
self.list_resources.assert_called_with(fields='id', id=NAME)
def test_nameo(self):
self.list_resources.return_value = {RESOURCES: [self.matrix]}
result = common.find(self.mock_client, RESOURCE, RESOURCES, NAME,
name_attr='nameo')
self.assertEqual(ID, result)
self.list_resources.assert_called_with(fields='id', nameo=NAME)
def test_dups(self):
dup = {'id': 'Larry'}
self.list_resources.return_value = {RESOURCES: [self.matrix, dup]}
self.assertRaises(exceptions.CommandError, common.find,
self.mock_client, RESOURCE, RESOURCES, NAME)
def test_nada(self):
self.list_resources.side_effect = [{RESOURCES: []},
{RESOURCES: []}]
self.assertRaises(exceptions.CommandError, common.find,
self.mock_client, RESOURCE, RESOURCES, NAME)
|
{
"content_hash": "e6da7443278131227233d3c02ee6fa06",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 74,
"avg_line_length": 33.67796610169491,
"alnum_prop": 0.6134876698540513,
"repo_name": "JioCloud/python-openstackclient",
"id": "b30fdfcb35951aa5435bc0c70808f00362e6d626",
"size": "2553",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "openstackclient/tests/network/test_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
39. Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
from __future__ import unicode_literals
from django.core import mail
from django.test import Client, TestCase, RequestFactory
from django.test import override_settings
from .views import get_view
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class ClientTest(TestCase):
fixtures = ['testdata.json']
urls = 'test_client.urls'
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {'var': '\xf2'}
response = self.client.get('/get_view/', data)
# Check some response details
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], '\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post('/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
"POST some data to a view"
post_data = {
'value': 37
}
response = self.client.post('/post_view/', post_data)
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.assertContains(response, 'Data received')
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/header_view/")
self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_response_attached_request(self):
"""
Check that the returned response has a ``request`` attribute with the
originating environ dict and a ``wsgi_request`` with the originating
``WSGIRequest`` instance.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, 'request'))
self.assertTrue(hasattr(response, 'wsgi_request'))
for key, value in response.request.items():
self.assertIn(key, response.wsgi_request.environ)
self.assertEqual(response.wsgi_request.environ[key], value)
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?><library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>"""
response = self.client.post("/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_insecure(self):
"GET a URL through http"
response = self.client.get('/secure_view/', secure=False)
self.assertFalse(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '80')
def test_secure(self):
"GET a URL through https"
response = self.client.get('/secure_view/', secure=True)
self.assertTrue(response.test_was_secure_request)
self.assertEqual(response.test_server_port, '443')
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get('/redirect_view/')
# Check that the response was a 302 (redirect) and that
# assertRedirect() understands to put an implicit http://testserver/ in
# front of non-absolute URLs.
self.assertRedirects(response, '/get_view/')
host = 'django.testserver'
client_providing_host = Client(HTTP_HOST=host)
response = client_providing_host.get('/redirect_view/')
# Check that the response was a 302 (redirect) with absolute URI
self.assertRedirects(response, '/get_view/', host=host)
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get('/redirect_view/', {'var': 'value'})
# Check if parameters are intact
self.assertRedirects(response, 'http://testserver/get_view/?var=value')
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get('/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect)
self.assertRedirects(response, 'http://testserver/get_view/', status_code=301)
client_providing_host = Client(HTTP_HOST='django.testserver')
response = client_providing_host.get('/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect) with absolute URI
self.assertRedirects(response, 'http://django.testserver/get_view/', status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get('/temporary_redirect_view/')
# Check that the response was a 302 (non-permanent redirect)
self.assertRedirects(response, 'http://testserver/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get('/double_redirect_view/')
# Check that the response was a 302, and that
# the attempt to get the redirection location returned 301 when retrieved
self.assertRedirects(response, 'http://testserver/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get('/double_redirect_view/', follow=True)
self.assertRedirects(response, 'http://testserver/get_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 2)
def test_redirect_http(self):
"GET a URL that redirects to an http URI"
response = self.client.get('/http_redirect_view/', follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"GET a URL that redirects to an https URI"
response = self.client.get('/https_redirect_view/', follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get('/bad_view/')
# Check that the response was a 404, and that the content contains MAGIC
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {
'text': 'Hello World',
'multi': ('b', 'c', 'e')
}
response = self.client.get('/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
# Check that the multi-value data has been rolled out ok
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b', 'c', 'e')
}
response = self.client.post('/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/unknown_view/')
# Check that the response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get('/unknown_view/;some-parameter')
# Check that the path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter')
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertRedirects(response, 'http://testserver/accounts/login/?redirect_to=/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='nopassword')
self.assertFalse(login)
def test_view_with_inactive_login(self):
"Request a page that is protected with @login, but use an inactive login"
login = self.client.login(username='inactive', password='password')
self.assertFalse(login)
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/login_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/login_protected_view/')
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"Request a page that is protected with @permission_required but raises an exception"
# Get the page without logging in. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/permission_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get('/django_project_redirect/')
self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False)
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
try:
self.client.session['tobacconist']
self.fail("Shouldn't have a session value")
except KeyError:
pass
self.client.post('/session_view/')
# Check that the session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
def test_view_with_exception(self):
"Request a page that is known to throw an error"
self.assertRaises(KeyError, self.client.get, "/broken_view/")
# Try the same assertion, a different way
try:
self.client.get('/broken_view/')
self.fail('Should raise an error')
except KeyError:
pass
def test_mail_sending(self):
"Test that mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
def test_mass_mail_sending(self):
"Test that mass mail is redirected to a dummy outbox during test setup"
response = self.client.get('/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, 'from@example.com')
self.assertEqual(mail.outbox[1].to[0], 'second@example.com')
self.assertEqual(mail.outbox[1].to[1], 'third@example.com')
@override_settings(
MIDDLEWARE_CLASSES=('django.middleware.csrf.CsrfViewMiddleware',)
)
class CSRFEnabledClientTests(TestCase):
urls = 'test_client.urls'
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(TestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertEqual(hasattr(self.client, "i_am_customized"), True)
class RequestFactoryTest(TestCase):
urls = 'test_client.urls'
def test_request_factory(self):
factory = RequestFactory()
request = factory.get('/somewhere/')
response = get_view(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is a test')
|
{
"content_hash": "1e247e365626d4ed1c58ee6e00fa24e2",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 148,
"avg_line_length": 42.33888888888889,
"alnum_prop": 0.6542448497572497,
"repo_name": "oscaro/django",
"id": "df8515bcff3bdd65d144550be50a2ac9e0a158b8",
"size": "22887",
"binary": false,
"copies": "5",
"ref": "refs/heads/oscaro-backports-1.7.10",
"path": "tests/test_client/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42829"
},
{
"name": "HTML",
"bytes": "174148"
},
{
"name": "JavaScript",
"bytes": "102935"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "10109639"
},
{
"name": "Shell",
"bytes": "3056"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Tanh(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Tanh',
inputs=['x'],
outputs=['y'],
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.tanh(x) # expected output [-0.76159418, 0., 0.76159418]
expect(node, inputs=[x], outputs=[y],
name='test_tanh_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.tanh(x)
expect(node, inputs=[x], outputs=[y],
name='test_tanh')
|
{
"content_hash": "dbc09634a67b1acff8bde64856adce92",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 71,
"avg_line_length": 26.193548387096776,
"alnum_prop": 0.5591133004926109,
"repo_name": "mlperf/training_results_v0.6",
"id": "c01e76b88a358bcde4ccb440f23cd728a04125cb",
"size": "812",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/onnx-tensorrt/third_party/onnx/onnx/backend/test/case/node/tanh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
}
|
from django.db import models
from . import Geoposition
from .forms import GeopositionField as GeopositionFormField
from django.utils.encoding import smart_unicode
class GeopositionField(models.Field):
description = "A geoposition (latitude and longitude)"
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 42
super(GeopositionField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'CharField'
def to_python(self, value):
if not value:
value = [0,0]
if isinstance(value, Geoposition):
return value
if isinstance(value, list):
return Geoposition(value[0], value[1])
value_parts = value.rsplit(',')
try:
latitude = value_parts[0]
except IndexError:
latitude = '0.0'
try:
longitude = value_parts[1]
except IndexError:
longitude = '0.0'
return Geoposition(latitude, longitude)
def get_prep_value(self, value):
return unicode(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {
'form_class': GeopositionFormField
}
defaults.update(kwargs)
return super(GeopositionField, self).formfield(**defaults)
|
{
"content_hash": "31a37ad8a6bc8422ff033c5038a5102d",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 66,
"avg_line_length": 28.313725490196077,
"alnum_prop": 0.6052631578947368,
"repo_name": "coxmediagroup/django-geoposition",
"id": "dc841f4b5d3934e5390d69f87f8ea1d83f86702f",
"size": "1444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geoposition/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "812"
},
{
"name": "HTML",
"bytes": "151"
},
{
"name": "JavaScript",
"bytes": "4696"
},
{
"name": "Python",
"bytes": "5165"
}
],
"symlink_target": ""
}
|
from pymongo import MongoClient
from flask import session
client = MongoClient()
db = client.pcparts
def register(username,password,chkpw,email):
if(db.users.find({"username":username}).count()) != 0:
return "There is a account with that username"
elif(chkpw != password):
return "Passwords aren't the same"
elif(db.users.find({"email":email}).count()) != 0:
return "There is an account with that email"
else:
db.users.insert({'username':username, 'password' : password, 'email' : email})
return 'True'
def login(user,password):
check=db.users.find_one({'username':user,'password':password}, fields={'_id':False})
if (db.users.find({"username":user}).count()) == 0:
return "No account with that username"
elif check == None:
return "Username or password is invalid"
else:
return 'True'
|
{
"content_hash": "57d8c293410b7cc7be593a9d793c9052",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 88,
"avg_line_length": 34.07692307692308,
"alnum_prop": 0.6455981941309256,
"repo_name": "stuycs-softdev-fall-2013/proj3-6-JJTS",
"id": "2525397bd73bbaba2e67b295ad64e094dcdd8c33",
"size": "886",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "125810"
},
{
"name": "Python",
"bytes": "26941"
}
],
"symlink_target": ""
}
|
import os
import json
from flask import Flask, request, Response
from flask import render_template, send_from_directory, url_for
app = Flask(__name__)
app.config.from_object('url_short.config')
app.url_map.strict_slashes = False
import url_short.views
|
{
"content_hash": "407c0e147a149d8d78b534439bb68b6c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 63,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.765625,
"repo_name": "neerajrao/redis-url-shorten",
"id": "eeaf94bc1cc992a6b54b6587ecaaef2a86dfb5cd",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "url_short/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3205"
},
{
"name": "JavaScript",
"bytes": "3407"
},
{
"name": "Python",
"bytes": "7893"
}
],
"symlink_target": ""
}
|
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class ExpressRouteCircuitAuthorizationsOperations(object):
"""ExpressRouteCircuitAuthorizationsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2016-09-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-09-01"
self.config = config
def delete(
self, resource_group_name, circuit_name, authorization_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified authorization from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, circuit_name, authorization_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified authorization from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ExpressRouteCircuitAuthorization
<azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitAuthorization>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, circuit_name, authorization_name, authorization_parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates an authorization in the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param authorization_parameters: Parameters supplied to the create or
update express route circuit authorization operation.
:type authorization_parameters:
:class:`ExpressRouteCircuitAuthorization
<azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitAuthorization>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`ExpressRouteCircuitAuthorization
<azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitAuthorization>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(authorization_parameters, 'ExpressRouteCircuitAuthorization')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', response)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
"""Gets all authorizations in an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ExpressRouteCircuitAuthorizationPaged
<azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitAuthorizationPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteCircuitAuthorizationPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteCircuitAuthorizationPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
{
"content_hash": "179d226fd721b63c276d91396db5fa08",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 180,
"avg_line_length": 47.17836257309941,
"alnum_prop": 0.651688875116207,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "46a69e0b448eadb55950b0d77461cae721d096e8",
"size": "16609",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_09_01/operations/express_route_circuit_authorizations_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
}
|
from flask import Blueprint
admin = Blueprint('admin', __name__)
from . import views
|
{
"content_hash": "d5cdaf392621d75dbcc13f7f09ef3dad",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 36,
"avg_line_length": 17.4,
"alnum_prop": 0.7126436781609196,
"repo_name": "StarInworld/voluntary",
"id": "66bcf9bafc530b3c3f01213e4a17c570fe6da457",
"size": "178",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/admin/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "181404"
},
{
"name": "HTML",
"bytes": "77463"
},
{
"name": "JavaScript",
"bytes": "32665"
},
{
"name": "Python",
"bytes": "35291"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contentstore", "0007_auto_20171102_0950"),
("subscriptions", "0007_auto_20171102_0950"),
]
operations = [
migrations.CreateModel(
name="ResendRequest",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("received_at", models.DateTimeField(auto_now_add=True)),
("outbound", models.UUIDField(null=True)),
(
"message",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="resend_requests",
to="contentstore.Message",
),
),
(
"subscription",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="subscriptions.Subscription",
),
),
],
)
]
|
{
"content_hash": "5a63e2bc5b6f9427ee6f20212539f3c3",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 73,
"avg_line_length": 31,
"alnum_prop": 0.4186684969114619,
"repo_name": "praekelt/seed-staged-based-messaging",
"id": "c5235a3fa5c72455c43f60e73e714453ea06ef04",
"size": "1530",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "subscriptions/migrations/0008_resendrequest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "51899"
}
],
"symlink_target": ""
}
|
from flask import redirect, url_for
from flask_login import current_user
from .account import SignIn, signout,\
github_auth, github_auth_grant, github_auth_revoke,\
weibo_auth, weibo_auth_grant, weibo_auth_revoke
from .dashboard import dashboard
from .posts import posts, PostsWrite
from .srm import list_problems, ProblemsWrite
from .images import images, ImagesWrite
def index():
if current_user.is_anonymous():
return redirect(url_for('.signin'))
else:
return redirect(url_for('.dashboard'))
|
{
"content_hash": "8bd7cec5235765065b005331adca778e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 56,
"avg_line_length": 31.176470588235293,
"alnum_prop": 0.7339622641509433,
"repo_name": "warmsea/warmsea.net",
"id": "d5905e184b64b4bf4bf5cdec6cf7297871e1efa4",
"size": "530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "warmsea/admin/views/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "365666"
},
{
"name": "HTML",
"bytes": "46919"
},
{
"name": "JavaScript",
"bytes": "199157"
},
{
"name": "Python",
"bytes": "69835"
},
{
"name": "Shell",
"bytes": "1490"
},
{
"name": "TypeScript",
"bytes": "4001"
}
],
"symlink_target": ""
}
|
"""
James found a love letter his friend Harry has written for his girlfriend. James is a prankster, so he decides to meddle
with the letter. He changes all the words in the letter into palindromes.
To do this, he follows 2 rules:
(a) He can reduce the value of a letter, e.g. he can change 'd' to 'c', but he cannot change 'c' to 'd'.
(b) In order to form a palindrome, if he has to repeatedly reduce the value of a letter, he can do it until the letter
becomes 'a'. Once a letter has been changed to 'a', it can no longer be changed.
Each reduction in the value of any letter is counted as a single operation. Find the minimum number of operations
required to convert a given string into a palindrome.
Input Format
The first line contains an integer T, i.e., the number of test cases.
The next T lines will contain a string each. The strings do not contain any spaces.
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
"""
scan from both sides, doing operation
:param cipher: the cipher
"""
start_ptr = 0
end_ptr = len(cipher) - 1
cnt = 0
while start_ptr < end_ptr:
ord1 = ord(cipher[start_ptr]) - ord('a')
ord2 = ord(cipher[end_ptr]) - ord('a')
cnt += abs(ord1 - ord2)
start_ptr += 1
end_ptr -= 1
return cnt
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
cipher = f.readline().strip()
# solve
s = "%s\n" % (Solution().solve(cipher))
print s,
|
{
"content_hash": "b9d7fc73ce567c7f52200942b5b89ca5",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 120,
"avg_line_length": 30.836363636363636,
"alnum_prop": 0.6179245283018868,
"repo_name": "algorhythms/HackerRankAlgorithms",
"id": "c005d0a854790442ca611a76587f5223ca5bbc66",
"size": "1696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "The Love-Letter Mystery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "5450"
},
{
"name": "Java",
"bytes": "2566"
},
{
"name": "Python",
"bytes": "208997"
}
],
"symlink_target": ""
}
|
import unittest
import io
class TestThreadedTaskDispatcher(unittest.TestCase):
def _makeOne(self):
from waitress.task import ThreadedTaskDispatcher
return ThreadedTaskDispatcher()
def test_handler_thread_task_is_None(self):
inst = self._makeOne()
inst.threads[0] = True
inst.queue.put(None)
inst.handler_thread(0)
self.assertEqual(inst.stop_count, -1)
self.assertEqual(inst.threads, {})
def test_handler_thread_task_raises(self):
from waitress.task import JustTesting
inst = self._makeOne()
inst.threads[0] = True
inst.logger = DummyLogger()
task = DummyTask(JustTesting)
inst.logger = DummyLogger()
inst.queue.put(task)
inst.handler_thread(0)
self.assertEqual(inst.stop_count, -1)
self.assertEqual(inst.threads, {})
self.assertEqual(len(inst.logger.logged), 1)
def test_set_thread_count_increase(self):
inst = self._makeOne()
L = []
inst.start_new_thread = lambda *x: L.append(x)
inst.set_thread_count(1)
self.assertEqual(L, [(inst.handler_thread, (0,))])
def test_set_thread_count_increase_with_existing(self):
inst = self._makeOne()
L = []
inst.threads = {0: 1}
inst.start_new_thread = lambda *x: L.append(x)
inst.set_thread_count(2)
self.assertEqual(L, [(inst.handler_thread, (1,))])
def test_set_thread_count_decrease(self):
inst = self._makeOne()
inst.threads = {'a': 1, 'b': 2}
inst.set_thread_count(1)
self.assertEqual(inst.queue.qsize(), 1)
self.assertEqual(inst.queue.get(), None)
def test_set_thread_count_same(self):
inst = self._makeOne()
L = []
inst.start_new_thread = lambda *x: L.append(x)
inst.threads = {0: 1}
inst.set_thread_count(1)
self.assertEqual(L, [])
def test_add_task(self):
task = DummyTask()
inst = self._makeOne()
inst.add_task(task)
self.assertEqual(inst.queue.qsize(), 1)
self.assertTrue(task.deferred)
def test_add_task_defer_raises(self):
task = DummyTask(ValueError)
inst = self._makeOne()
self.assertRaises(ValueError, inst.add_task, task)
self.assertEqual(inst.queue.qsize(), 0)
self.assertTrue(task.deferred)
self.assertTrue(task.cancelled)
def test_shutdown_one_thread(self):
inst = self._makeOne()
inst.threads[0] = 1
inst.logger = DummyLogger()
task = DummyTask()
inst.queue.put(task)
self.assertEqual(inst.shutdown(timeout=.01), True)
self.assertEqual(inst.logger.logged, ['1 thread(s) still running'])
self.assertEqual(task.cancelled, True)
def test_shutdown_no_threads(self):
inst = self._makeOne()
self.assertEqual(inst.shutdown(timeout=.01), True)
def test_shutdown_no_cancel_pending(self):
inst = self._makeOne()
self.assertEqual(inst.shutdown(cancel_pending=False, timeout=.01),
False)
class TestTask(unittest.TestCase):
def _makeOne(self, channel=None, request=None):
if channel is None:
channel = DummyChannel()
if request is None:
request = DummyParser()
from waitress.task import Task
return Task(channel, request)
def test_ctor_version_not_in_known(self):
request = DummyParser()
request.version = '8.4'
inst = self._makeOne(request=request)
self.assertEqual(inst.version, '1.0')
def test_cancel(self):
inst = self._makeOne()
inst.cancel()
self.assertTrue(inst.close_on_finish)
def test_defer(self):
inst = self._makeOne()
self.assertEqual(inst.defer(), None)
def test_build_response_header_bad_http_version(self):
inst = self._makeOne()
inst.request = DummyParser()
inst.version = '8.4'
self.assertRaises(AssertionError, inst.build_response_header)
def test_build_response_header_v10_keepalive_no_content_length(self):
inst = self._makeOne()
inst.request = DummyParser()
inst.request.headers['CONNECTION'] = 'keep-alive'
inst.version = '1.0'
result = inst.build_response_header()
lines = filter_lines(result)
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], b'HTTP/1.0 200 OK')
self.assertEqual(lines[1], b'Connection: close')
self.assertTrue(lines[2].startswith(b'Date:'))
self.assertEqual(lines[3], b'Server: waitress')
self.assertEqual(inst.close_on_finish, True)
self.assertTrue(('Connection', 'close') in inst.response_headers)
def test_build_response_header_v10_keepalive_with_content_length(self):
inst = self._makeOne()
inst.request = DummyParser()
inst.request.headers['CONNECTION'] = 'keep-alive'
inst.response_headers = [('Content-Length', '10')]
inst.version = '1.0'
inst.content_length = 0
result = inst.build_response_header()
lines = filter_lines(result)
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], b'HTTP/1.0 200 OK')
self.assertEqual(lines[1], b'Connection: Keep-Alive')
self.assertEqual(lines[2], b'Content-Length: 10')
self.assertTrue(lines[3].startswith(b'Date:'))
self.assertEqual(lines[4], b'Server: waitress')
self.assertEqual(inst.close_on_finish, False)
def test_build_response_header_v11_connection_closed_by_client(self):
inst = self._makeOne()
inst.request = DummyParser()
inst.version = '1.1'
inst.request.headers['CONNECTION'] = 'close'
result = inst.build_response_header()
lines = filter_lines(result)
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], b'HTTP/1.1 200 OK')
self.assertEqual(lines[1], b'Connection: close')
self.assertTrue(lines[2].startswith(b'Date:'))
self.assertEqual(lines[3], b'Server: waitress')
self.assertEqual(lines[4], b'Transfer-Encoding: chunked')
self.assertTrue(('Connection', 'close') in inst.response_headers)
self.assertEqual(inst.close_on_finish, True)
def test_build_response_header_v11_connection_keepalive_by_client(self):
inst = self._makeOne()
inst.request = DummyParser()
inst.request.headers['CONNECTION'] = 'keep-alive'
inst.version = '1.1'
result = inst.build_response_header()
lines = filter_lines(result)
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], b'HTTP/1.1 200 OK')
self.assertEqual(lines[1], b'Connection: close')
self.assertTrue(lines[2].startswith(b'Date:'))
self.assertEqual(lines[3], b'Server: waitress')
self.assertEqual(lines[4], b'Transfer-Encoding: chunked')
self.assertTrue(('Connection', 'close') in inst.response_headers)
self.assertEqual(inst.close_on_finish, True)
def test_build_response_header_v11_200_no_content_length(self):
inst = self._makeOne()
inst.request = DummyParser()
inst.version = '1.1'
result = inst.build_response_header()
lines = filter_lines(result)
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], b'HTTP/1.1 200 OK')
self.assertEqual(lines[1], b'Connection: close')
self.assertTrue(lines[2].startswith(b'Date:'))
self.assertEqual(lines[3], b'Server: waitress')
self.assertEqual(lines[4], b'Transfer-Encoding: chunked')
self.assertEqual(inst.close_on_finish, True)
self.assertTrue(('Connection', 'close') in inst.response_headers)
def test_build_response_header_via_added(self):
inst = self._makeOne()
inst.request = DummyParser()
inst.version = '1.0'
inst.response_headers = [('Server', 'abc')]
result = inst.build_response_header()
lines = filter_lines(result)
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], b'HTTP/1.0 200 OK')
self.assertEqual(lines[1], b'Connection: close')
self.assertTrue(lines[2].startswith(b'Date:'))
self.assertEqual(lines[3], b'Server: abc')
self.assertEqual(lines[4], b'Via: waitress')
def test_build_response_header_date_exists(self):
inst = self._makeOne()
inst.request = DummyParser()
inst.version = '1.0'
inst.response_headers = [('Date', 'date')]
result = inst.build_response_header()
lines = filter_lines(result)
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], b'HTTP/1.0 200 OK')
self.assertEqual(lines[1], b'Connection: close')
self.assertTrue(lines[2].startswith(b'Date:'))
self.assertEqual(lines[3], b'Server: waitress')
def test_build_response_header_preexisting_content_length(self):
inst = self._makeOne()
inst.request = DummyParser()
inst.version = '1.1'
inst.content_length = 100
result = inst.build_response_header()
lines = filter_lines(result)
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], b'HTTP/1.1 200 OK')
self.assertEqual(lines[1], b'Content-Length: 100')
self.assertTrue(lines[2].startswith(b'Date:'))
self.assertEqual(lines[3], b'Server: waitress')
def test_remove_content_length_header(self):
inst = self._makeOne()
inst.response_headers = [('Content-Length', '70')]
inst.remove_content_length_header()
self.assertEqual(inst.response_headers, [])
def test_start(self):
inst = self._makeOne()
inst.start()
self.assertTrue(inst.start_time)
def test_finish_didnt_write_header(self):
inst = self._makeOne()
inst.wrote_header = False
inst.complete = True
inst.finish()
self.assertTrue(inst.channel.written)
def test_finish_wrote_header(self):
inst = self._makeOne()
inst.wrote_header = True
inst.finish()
self.assertFalse(inst.channel.written)
def test_finish_chunked_response(self):
inst = self._makeOne()
inst.wrote_header = True
inst.chunked_response = True
inst.finish()
self.assertEqual(inst.channel.written, b'0\r\n\r\n')
def test_write_wrote_header(self):
inst = self._makeOne()
inst.wrote_header = True
inst.complete = True
inst.content_length = 3
inst.write(b'abc')
self.assertEqual(inst.channel.written, b'abc')
def test_write_header_not_written(self):
inst = self._makeOne()
inst.wrote_header = False
inst.complete = True
inst.write(b'abc')
self.assertTrue(inst.channel.written)
self.assertEqual(inst.wrote_header, True)
def test_write_start_response_uncalled(self):
inst = self._makeOne()
self.assertRaises(RuntimeError, inst.write, b'')
def test_write_chunked_response(self):
inst = self._makeOne()
inst.wrote_header = True
inst.chunked_response = True
inst.complete = True
inst.write(b'abc')
self.assertEqual(inst.channel.written, b'3\r\nabc\r\n')
def test_write_preexisting_content_length(self):
inst = self._makeOne()
inst.wrote_header = True
inst.complete = True
inst.content_length = 1
inst.logger = DummyLogger()
inst.write(b'abc')
self.assertTrue(inst.channel.written)
self.assertEqual(inst.logged_write_excess, True)
self.assertEqual(len(inst.logger.logged), 1)
class TestWSGITask(unittest.TestCase):
def _makeOne(self, channel=None, request=None):
if channel is None:
channel = DummyChannel()
if request is None:
request = DummyParser()
from waitress.task import WSGITask
return WSGITask(channel, request)
def test_service(self):
inst = self._makeOne()
def execute():
inst.executed = True
inst.execute = execute
inst.complete = True
inst.service()
self.assertTrue(inst.start_time)
self.assertTrue(inst.close_on_finish)
self.assertTrue(inst.channel.written)
self.assertEqual(inst.executed, True)
def test_service_server_raises_socket_error(self):
import socket
inst = self._makeOne()
def execute():
raise socket.error
inst.execute = execute
self.assertRaises(socket.error, inst.service)
self.assertTrue(inst.start_time)
self.assertTrue(inst.close_on_finish)
self.assertFalse(inst.channel.written)
def test_execute_app_calls_start_response_twice_wo_exc_info(self):
def app(environ, start_response):
start_response('200 OK', [])
start_response('200 OK', [])
inst = self._makeOne()
inst.channel.server.application = app
self.assertRaises(AssertionError, inst.execute)
def test_execute_app_calls_start_response_w_exc_info_complete(self):
def app(environ, start_response):
start_response('200 OK', [], [ValueError, ValueError(), None])
return [b'a']
inst = self._makeOne()
inst.complete = True
inst.channel.server.application = app
inst.execute()
self.assertTrue(inst.complete)
self.assertEqual(inst.status, '200 OK')
self.assertTrue(inst.channel.written)
def test_execute_app_calls_start_response_w_excinf_headers_unwritten(self):
def app(environ, start_response):
start_response('200 OK', [], [ValueError, None, None])
return [b'a']
inst = self._makeOne()
inst.wrote_header = False
inst.channel.server.application = app
inst.response_headers = [('a', 'b')]
inst.execute()
self.assertTrue(inst.complete)
self.assertEqual(inst.status, '200 OK')
self.assertTrue(inst.channel.written)
self.assertFalse(('a','b') in inst.response_headers)
def test_execute_app_calls_start_response_w_excinf_headers_written(self):
def app(environ, start_response):
start_response('200 OK', [], [ValueError, ValueError(), None])
inst = self._makeOne()
inst.complete = True
inst.wrote_header = True
inst.channel.server.application = app
self.assertRaises(ValueError, inst.execute)
def test_execute_bad_header_key(self):
def app(environ, start_response):
start_response('200 OK', [(None, 'a')])
inst = self._makeOne()
inst.channel.server.application = app
self.assertRaises(AssertionError, inst.execute)
def test_execute_bad_header_value(self):
def app(environ, start_response):
start_response('200 OK', [('a', None)])
inst = self._makeOne()
inst.channel.server.application = app
self.assertRaises(AssertionError, inst.execute)
def test_execute_hopbyhop_header(self):
def app(environ, start_response):
start_response('200 OK', [('Connection', 'close')])
inst = self._makeOne()
inst.channel.server.application = app
self.assertRaises(AssertionError, inst.execute)
def test_execute_bad_header_value_control_characters(self):
def app(environ, start_response):
start_response('200 OK', [('a', '\n')])
inst = self._makeOne()
inst.channel.server.application = app
self.assertRaises(ValueError, inst.execute)
def test_execute_bad_header_name_control_characters(self):
def app(environ, start_response):
start_response('200 OK', [('a\r', 'value')])
inst = self._makeOne()
inst.channel.server.application = app
self.assertRaises(ValueError, inst.execute)
def test_execute_bad_status_control_characters(self):
def app(environ, start_response):
start_response('200 OK\r', [])
inst = self._makeOne()
inst.channel.server.application = app
self.assertRaises(ValueError, inst.execute)
def test_preserve_header_value_order(self):
def app(environ, start_response):
write = start_response('200 OK', [('C', 'b'), ('A', 'b'), ('A', 'a')])
write(b'abc')
return []
inst = self._makeOne()
inst.channel.server.application = app
inst.execute()
self.assertTrue(b'A: b\r\nA: a\r\nC: b\r\n' in inst.channel.written)
def test_execute_bad_status_value(self):
def app(environ, start_response):
start_response(None, [])
inst = self._makeOne()
inst.channel.server.application = app
self.assertRaises(AssertionError, inst.execute)
def test_execute_with_content_length_header(self):
def app(environ, start_response):
start_response('200 OK', [('Content-Length', '1')])
return [b'a']
inst = self._makeOne()
inst.channel.server.application = app
inst.execute()
self.assertEqual(inst.content_length, 1)
def test_execute_app_calls_write(self):
def app(environ, start_response):
write = start_response('200 OK', [('Content-Length', '3')])
write(b'abc')
return []
inst = self._makeOne()
inst.channel.server.application = app
inst.execute()
self.assertEqual(inst.channel.written[-3:], b'abc')
def test_execute_app_returns_len1_chunk_without_cl(self):
def app(environ, start_response):
start_response('200 OK', [])
return [b'abc']
inst = self._makeOne()
inst.channel.server.application = app
inst.execute()
self.assertEqual(inst.content_length, 3)
def test_execute_app_returns_empty_chunk_as_first(self):
def app(environ, start_response):
start_response('200 OK', [])
return ['', b'abc']
inst = self._makeOne()
inst.channel.server.application = app
inst.execute()
self.assertEqual(inst.content_length, None)
def test_execute_app_returns_too_many_bytes(self):
def app(environ, start_response):
start_response('200 OK', [('Content-Length', '1')])
return [b'abc']
inst = self._makeOne()
inst.channel.server.application = app
inst.logger = DummyLogger()
inst.execute()
self.assertEqual(inst.close_on_finish, True)
self.assertEqual(len(inst.logger.logged), 1)
def test_execute_app_returns_too_few_bytes(self):
def app(environ, start_response):
start_response('200 OK', [('Content-Length', '3')])
return [b'a']
inst = self._makeOne()
inst.channel.server.application = app
inst.logger = DummyLogger()
inst.execute()
self.assertEqual(inst.close_on_finish, True)
self.assertEqual(len(inst.logger.logged), 1)
def test_execute_app_do_not_warn_on_head(self):
def app(environ, start_response):
start_response('200 OK', [('Content-Length', '3')])
return [b'']
inst = self._makeOne()
inst.request.command = 'HEAD'
inst.channel.server.application = app
inst.logger = DummyLogger()
inst.execute()
self.assertEqual(inst.close_on_finish, True)
self.assertEqual(len(inst.logger.logged), 0)
def test_execute_app_returns_closeable(self):
class closeable(list):
def close(self):
self.closed = True
foo = closeable([b'abc'])
def app(environ, start_response):
start_response('200 OK', [('Content-Length', '3')])
return foo
inst = self._makeOne()
inst.channel.server.application = app
inst.execute()
self.assertEqual(foo.closed, True)
def test_execute_app_returns_filewrapper_prepare_returns_True(self):
from waitress.buffers import ReadOnlyFileBasedBuffer
f = io.BytesIO(b'abc')
app_iter = ReadOnlyFileBasedBuffer(f, 8192)
def app(environ, start_response):
start_response('200 OK', [('Content-Length', '3')])
return app_iter
inst = self._makeOne()
inst.channel.server.application = app
inst.execute()
self.assertTrue(inst.channel.written) # header
self.assertEqual(inst.channel.otherdata, [app_iter])
def test_execute_app_returns_filewrapper_prepare_returns_True_nocl(self):
from waitress.buffers import ReadOnlyFileBasedBuffer
f = io.BytesIO(b'abc')
app_iter = ReadOnlyFileBasedBuffer(f, 8192)
def app(environ, start_response):
start_response('200 OK', [])
return app_iter
inst = self._makeOne()
inst.channel.server.application = app
inst.execute()
self.assertTrue(inst.channel.written) # header
self.assertEqual(inst.channel.otherdata, [app_iter])
self.assertEqual(inst.content_length, 3)
def test_execute_app_returns_filewrapper_prepare_returns_True_badcl(self):
from waitress.buffers import ReadOnlyFileBasedBuffer
f = io.BytesIO(b'abc')
app_iter = ReadOnlyFileBasedBuffer(f, 8192)
def app(environ, start_response):
start_response('200 OK', [])
return app_iter
inst = self._makeOne()
inst.channel.server.application = app
inst.content_length = 10
inst.response_headers = [('Content-Length', '10')]
inst.execute()
self.assertTrue(inst.channel.written) # header
self.assertEqual(inst.channel.otherdata, [app_iter])
self.assertEqual(inst.content_length, 3)
self.assertEqual(dict(inst.response_headers)['Content-Length'], '3')
def test_get_environment_already_cached(self):
inst = self._makeOne()
inst.environ = object()
self.assertEqual(inst.get_environment(), inst.environ)
def test_get_environment_path_startswith_more_than_one_slash(self):
inst = self._makeOne()
request = DummyParser()
request.path = '///abc'
inst.request = request
environ = inst.get_environment()
self.assertEqual(environ['PATH_INFO'], '/abc')
def test_get_environment_path_empty(self):
inst = self._makeOne()
request = DummyParser()
request.path = ''
inst.request = request
environ = inst.get_environment()
self.assertEqual(environ['PATH_INFO'], '')
def test_get_environment_no_query(self):
inst = self._makeOne()
request = DummyParser()
inst.request = request
environ = inst.get_environment()
self.assertEqual(environ['QUERY_STRING'], '')
def test_get_environment_with_query(self):
inst = self._makeOne()
request = DummyParser()
request.query = 'abc'
inst.request = request
environ = inst.get_environment()
self.assertEqual(environ['QUERY_STRING'], 'abc')
def test_get_environ_with_url_prefix_miss(self):
inst = self._makeOne()
inst.channel.server.adj.url_prefix = '/foo'
request = DummyParser()
request.path = '/bar'
inst.request = request
environ = inst.get_environment()
self.assertEqual(environ['PATH_INFO'], '/bar')
self.assertEqual(environ['SCRIPT_NAME'], '/foo')
def test_get_environ_with_url_prefix_hit(self):
inst = self._makeOne()
inst.channel.server.adj.url_prefix = '/foo'
request = DummyParser()
request.path = '/foo/fuz'
inst.request = request
environ = inst.get_environment()
self.assertEqual(environ['PATH_INFO'], '/fuz')
self.assertEqual(environ['SCRIPT_NAME'], '/foo')
def test_get_environ_with_url_prefix_empty_path(self):
inst = self._makeOne()
inst.channel.server.adj.url_prefix = '/foo'
request = DummyParser()
request.path = '/foo'
inst.request = request
environ = inst.get_environment()
self.assertEqual(environ['PATH_INFO'], '')
self.assertEqual(environ['SCRIPT_NAME'], '/foo')
def test_get_environment_values(self):
import sys
inst = self._makeOne()
request = DummyParser()
request.headers = {
'CONTENT_TYPE': 'abc',
'CONTENT_LENGTH': '10',
'X_FOO': 'BAR',
'CONNECTION': 'close',
}
request.query = 'abc'
inst.request = request
environ = inst.get_environment()
# nail the keys of environ
self.assertEqual(sorted(environ.keys()), [
'CONTENT_LENGTH', 'CONTENT_TYPE', 'HTTP_CONNECTION', 'HTTP_X_FOO',
'PATH_INFO', 'QUERY_STRING', 'REMOTE_ADDR', 'REQUEST_METHOD',
'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT', 'SERVER_PROTOCOL',
'SERVER_SOFTWARE', 'wsgi.errors', 'wsgi.file_wrapper', 'wsgi.input',
'wsgi.multiprocess', 'wsgi.multithread', 'wsgi.run_once',
'wsgi.url_scheme', 'wsgi.version'])
self.assertEqual(environ['REQUEST_METHOD'], 'GET')
self.assertEqual(environ['SERVER_PORT'], '80')
self.assertEqual(environ['SERVER_NAME'], 'localhost')
self.assertEqual(environ['SERVER_SOFTWARE'], 'waitress')
self.assertEqual(environ['SERVER_PROTOCOL'], 'HTTP/1.0')
self.assertEqual(environ['SCRIPT_NAME'], '')
self.assertEqual(environ['HTTP_CONNECTION'], 'close')
self.assertEqual(environ['PATH_INFO'], '/')
self.assertEqual(environ['QUERY_STRING'], 'abc')
self.assertEqual(environ['REMOTE_ADDR'], '127.0.0.1')
self.assertEqual(environ['CONTENT_TYPE'], 'abc')
self.assertEqual(environ['CONTENT_LENGTH'], '10')
self.assertEqual(environ['HTTP_X_FOO'], 'BAR')
self.assertEqual(environ['wsgi.version'], (1, 0))
self.assertEqual(environ['wsgi.url_scheme'], 'http')
self.assertEqual(environ['wsgi.errors'], sys.stderr)
self.assertEqual(environ['wsgi.multithread'], True)
self.assertEqual(environ['wsgi.multiprocess'], False)
self.assertEqual(environ['wsgi.run_once'], False)
self.assertEqual(environ['wsgi.input'], 'stream')
self.assertEqual(inst.environ, environ)
def test_get_environment_values_w_scheme_override_untrusted(self):
inst = self._makeOne()
request = DummyParser()
request.headers = {
'CONTENT_TYPE': 'abc',
'CONTENT_LENGTH': '10',
'X_FOO': 'BAR',
'X_FORWARDED_PROTO': 'https',
'CONNECTION': 'close',
}
request.query = 'abc'
inst.request = request
environ = inst.get_environment()
self.assertEqual(environ['wsgi.url_scheme'], 'http')
def test_get_environment_values_w_scheme_override_trusted(self):
import sys
inst = self._makeOne()
inst.channel.addr = ['192.168.1.1']
inst.channel.server.adj.trusted_proxy = '192.168.1.1'
request = DummyParser()
request.headers = {
'CONTENT_TYPE': 'abc',
'CONTENT_LENGTH': '10',
'X_FOO': 'BAR',
'X_FORWARDED_PROTO': 'https',
'CONNECTION': 'close',
}
request.query = 'abc'
inst.request = request
environ = inst.get_environment()
# nail the keys of environ
self.assertEqual(sorted(environ.keys()), [
'CONTENT_LENGTH', 'CONTENT_TYPE', 'HTTP_CONNECTION', 'HTTP_X_FOO',
'PATH_INFO', 'QUERY_STRING', 'REMOTE_ADDR', 'REQUEST_METHOD',
'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT', 'SERVER_PROTOCOL',
'SERVER_SOFTWARE', 'wsgi.errors', 'wsgi.file_wrapper', 'wsgi.input',
'wsgi.multiprocess', 'wsgi.multithread', 'wsgi.run_once',
'wsgi.url_scheme', 'wsgi.version'])
self.assertEqual(environ['REQUEST_METHOD'], 'GET')
self.assertEqual(environ['SERVER_PORT'], '80')
self.assertEqual(environ['SERVER_NAME'], 'localhost')
self.assertEqual(environ['SERVER_SOFTWARE'], 'waitress')
self.assertEqual(environ['SERVER_PROTOCOL'], 'HTTP/1.0')
self.assertEqual(environ['SCRIPT_NAME'], '')
self.assertEqual(environ['HTTP_CONNECTION'], 'close')
self.assertEqual(environ['PATH_INFO'], '/')
self.assertEqual(environ['QUERY_STRING'], 'abc')
self.assertEqual(environ['REMOTE_ADDR'], '192.168.1.1')
self.assertEqual(environ['CONTENT_TYPE'], 'abc')
self.assertEqual(environ['CONTENT_LENGTH'], '10')
self.assertEqual(environ['HTTP_X_FOO'], 'BAR')
self.assertEqual(environ['wsgi.version'], (1, 0))
self.assertEqual(environ['wsgi.url_scheme'], 'https')
self.assertEqual(environ['wsgi.errors'], sys.stderr)
self.assertEqual(environ['wsgi.multithread'], True)
self.assertEqual(environ['wsgi.multiprocess'], False)
self.assertEqual(environ['wsgi.run_once'], False)
self.assertEqual(environ['wsgi.input'], 'stream')
self.assertEqual(inst.environ, environ)
def test_get_environment_values_w_bogus_scheme_override(self):
inst = self._makeOne()
inst.channel.addr = ['192.168.1.1']
inst.channel.server.adj.trusted_proxy = '192.168.1.1'
request = DummyParser()
request.headers = {
'CONTENT_TYPE': 'abc',
'CONTENT_LENGTH': '10',
'X_FOO': 'BAR',
'X_FORWARDED_PROTO': 'http://p02n3e.com?url=http',
'CONNECTION': 'close',
}
request.query = 'abc'
inst.request = request
self.assertRaises(ValueError, inst.get_environment)
class TestErrorTask(unittest.TestCase):
def _makeOne(self, channel=None, request=None):
if channel is None:
channel = DummyChannel()
if request is None:
request = DummyParser()
request.error = DummyError()
from waitress.task import ErrorTask
return ErrorTask(channel, request)
def test_execute_http_10(self):
inst = self._makeOne()
inst.execute()
lines = filter_lines(inst.channel.written)
self.assertEqual(len(lines), 9)
self.assertEqual(lines[0], b'HTTP/1.0 432 Too Ugly')
self.assertEqual(lines[1], b'Connection: close')
self.assertEqual(lines[2], b'Content-Length: 43')
self.assertEqual(lines[3], b'Content-Type: text/plain')
self.assertTrue(lines[4])
self.assertEqual(lines[5], b'Server: waitress')
self.assertEqual(lines[6], b'Too Ugly')
self.assertEqual(lines[7], b'body')
self.assertEqual(lines[8], b'(generated by waitress)')
def test_execute_http_11(self):
inst = self._makeOne()
inst.version = '1.1'
inst.execute()
lines = filter_lines(inst.channel.written)
self.assertEqual(len(lines), 8)
self.assertEqual(lines[0], b'HTTP/1.1 432 Too Ugly')
self.assertEqual(lines[1], b'Content-Length: 43')
self.assertEqual(lines[2], b'Content-Type: text/plain')
self.assertTrue(lines[3])
self.assertEqual(lines[4], b'Server: waitress')
self.assertEqual(lines[5], b'Too Ugly')
self.assertEqual(lines[6], b'body')
self.assertEqual(lines[7], b'(generated by waitress)')
def test_execute_http_11_close(self):
inst = self._makeOne()
inst.version = '1.1'
inst.request.headers['CONNECTION'] = 'close'
inst.execute()
lines = filter_lines(inst.channel.written)
self.assertEqual(len(lines), 9)
self.assertEqual(lines[0], b'HTTP/1.1 432 Too Ugly')
self.assertEqual(lines[1], b'Connection: close')
self.assertEqual(lines[2], b'Content-Length: 43')
self.assertEqual(lines[3], b'Content-Type: text/plain')
self.assertTrue(lines[4])
self.assertEqual(lines[5], b'Server: waitress')
self.assertEqual(lines[6], b'Too Ugly')
self.assertEqual(lines[7], b'body')
self.assertEqual(lines[8], b'(generated by waitress)')
def test_execute_http_11_keep(self):
inst = self._makeOne()
inst.version = '1.1'
inst.request.headers['CONNECTION'] = 'keep-alive'
inst.execute()
lines = filter_lines(inst.channel.written)
self.assertEqual(len(lines), 8)
self.assertEqual(lines[0], b'HTTP/1.1 432 Too Ugly')
self.assertEqual(lines[1], b'Content-Length: 43')
self.assertEqual(lines[2], b'Content-Type: text/plain')
self.assertTrue(lines[3])
self.assertEqual(lines[4], b'Server: waitress')
self.assertEqual(lines[5], b'Too Ugly')
self.assertEqual(lines[6], b'body')
self.assertEqual(lines[7], b'(generated by waitress)')
class DummyError(object):
code = '432'
reason = 'Too Ugly'
body = 'body'
class DummyTask(object):
serviced = False
deferred = False
cancelled = False
def __init__(self, toraise=None):
self.toraise = toraise
def service(self):
self.serviced = True
if self.toraise:
raise self.toraise
def defer(self):
self.deferred = True
if self.toraise:
raise self.toraise
def cancel(self):
self.cancelled = True
class DummyAdj(object):
log_socket_errors = True
ident = 'waitress'
host = '127.0.0.1'
port = 80
url_prefix = ''
trusted_proxy = None
class DummyServer(object):
server_name = 'localhost'
effective_port = 80
def __init__(self):
self.adj = DummyAdj()
class DummyChannel(object):
closed_when_done = False
adj = DummyAdj()
creation_time = 0
addr = ['127.0.0.1']
def __init__(self, server=None):
if server is None:
server = DummyServer()
self.server = server
self.written = b''
self.otherdata = []
def write_soon(self, data):
if isinstance(data, bytes):
self.written += data
else:
self.otherdata.append(data)
return len(data)
class DummyParser(object):
version = '1.0'
command = 'GET'
path = '/'
query = ''
url_scheme = 'http'
expect_continue = False
headers_finished = False
def __init__(self):
self.headers = {}
def get_body_stream(self):
return 'stream'
def filter_lines(s):
return list(filter(None, s.split(b'\r\n')))
class DummyLogger(object):
def __init__(self):
self.logged = []
def warning(self, msg):
self.logged.append(msg)
def exception(self, msg):
self.logged.append(msg)
|
{
"content_hash": "0c333945a1b15695f00d0c960cbc8410",
"timestamp": "",
"source": "github",
"line_count": 931,
"max_line_length": 82,
"avg_line_length": 37.578947368421055,
"alnum_prop": 0.6047561881895616,
"repo_name": "stefanv/aandete",
"id": "2a2759a23a041940465c264f9e31538b68ebd1e0",
"size": "34986",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "app/lib/waitress/tests/test_task.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "248684"
},
{
"name": "Python",
"bytes": "6478502"
}
],
"symlink_target": ""
}
|
import os
import shutil
import yaml
import glob
from momo.utils import run_cmd, mkdir_p, utf8_encode, txt_type, eval_path
from momo.plugins.base import Plugin
BASE_CONFIG_NAME = '__base__'
class Mkdocs(Plugin):
mkdocs_configs = {
'theme': 'readthedocs',
}
momo_configs = {
'momo_root_name': 'Home',
'momo_page_level': 1,
'momo_attr_table': True,
'momo_attr_css': True,
'momo_docs_dir': None,
'momo_docs_pathname': 'docs',
'momo_control_attr': False, # whether rendering control attriutes
}
def setup(self):
self.root = self.settings.bucket.root
bucket_name = self.settings.bucket.name
base_configs = self.settings.plugins.get(
'mkdocs', {}).get(BASE_CONFIG_NAME, {})
configs = self.settings.plugins.get(
'mkdocs', {}).get(bucket_name, {})
for k in base_configs:
if k not in configs:
configs[k] = base_configs[k]
# mkdocs site_name defaults to bucket name
self.mkdocs_configs['site_name'] = bucket_name
for k in configs:
if not k.startswith('momo_'):
self.mkdocs_configs[k] = configs[k]
for k in configs:
if k.startswith('momo_'):
self.momo_configs[k] = configs[k]
self.mkdocs_root_dir = os.path.join(self.settings.settings_dir,
'mkdocs')
self.mkdocs_dir = os.path.join(self.mkdocs_root_dir,
bucket_name)
self.docs_dir = os.path.join(self.mkdocs_dir, 'docs')
self.site_dir = os.path.join(self.mkdocs_dir, 'site')
if os.path.exists(self.docs_dir):
shutil.rmtree(self.docs_dir)
mkdir_p(self.docs_dir)
mkdir_p(self.site_dir)
assets = glob.glob(os.path.join(self.mkdocs_dir, '*'))
for asset in assets:
filename = os.path.basename(asset)
if filename not in set(['docs', 'site', 'mkdocs.yml']):
os.symlink(asset, os.path.join(self.docs_dir, filename))
self.root.name = self.momo_configs['momo_root_name']
def _get_pages(self, root, level=0):
if level == self.momo_configs['momo_page_level']:
filename = self._make_page(root)
return filename
else:
pages = [
{'Index': self._make_index_page(root, level + 1)}
]
pages += [
{elem.name: self._get_pages(elem, level + 1)}
for elem in root.node_svals
]
return pages
def _get_docs(self):
if self.momo_configs['momo_docs_dir'] is None:
return []
src_momo_docs_dir = eval_path(self.momo_configs['momo_docs_dir'])
if os.path.isdir(src_momo_docs_dir):
markdown_paths = glob.glob(
os.path.join(src_momo_docs_dir, '*.md'))
momo_docs_dir = os.path.join(
self.docs_dir, self.momo_configs['momo_docs_pathname'])
mkdir_p(momo_docs_dir)
docs = []
for markdown_path in markdown_paths:
shutil.copy(markdown_path, momo_docs_dir)
markdown_basename = os.path.basename(markdown_path)
doc_title = os.path.splitext(markdown_basename)[0].title()
doc_path = os.path.join(
self.momo_configs['momo_docs_pathname'], markdown_basename
)
docs.append({doc_title: doc_path})
return [{'Docs': docs}]
def _get_lazy_load_size(self, elem):
"""Get lazy load size for attributes of the current element's nodes."""
attr = elem.attrs.get('__lazy_load_size')
lazy_load_size = attr.content if attr is not None else None
if lazy_load_size is not None:
# parse size in WIDTHxHEIGHT format (px)
try:
width, height = map(int, lazy_load_size.split('x'))
except ValueError:
Exception(
'Invalid "__lazy_load_size" value %s' % lazy_load_size)
return (width, height)
return lazy_load_size
def _get_this_lazy_load_size(self, elem):
"""Get lazy load size for current element's attributes."""
attr = elem.attrs.get('__this_lazy_load_size')
this_lazy_load_size = attr.content if attr is not None else None
if this_lazy_load_size is not None:
# parse size in WIDTHxHEIGHT format (px)
try:
width, height = map(int, this_lazy_load_size.split('x'))
except ValueError:
Exception(
'Invalid "__this_lazy_load_size" value %s' %
this_lazy_load_size)
return (width, height)
return this_lazy_load_size
def _make_page(self, elem):
res = '%s.md' % os.path.join(*elem.path)
filename = os.path.join(self.docs_dir, res)
dirname = os.path.dirname(filename)
if dirname:
mkdir_p(dirname)
kwargs = {}
this_kwargs = {}
lazy_load_size = self._get_lazy_load_size(elem)
this_lazy_load_size = self._get_this_lazy_load_size(elem)
if lazy_load_size is not None:
kwargs['lazy_load_size'] = lazy_load_size
if this_lazy_load_size is not None:
this_kwargs['lazy_load_size'] = this_lazy_load_size
buf = []
with open(filename, 'w') as f:
buf.append(self._make_title(elem))
buf.append(self._make_attrs(elem, **this_kwargs))
buf.append(self._make_nodes(elem, **kwargs))
f.write(utf8_encode('\n'.join(buf)))
return res
def _make_index_page(self, elem, level):
base = os.path.join(*elem.path) if elem.path else ''
res = os.path.join(base, 'index.md')
filename = os.path.join(self.docs_dir, res)
dirname = os.path.dirname(filename)
if dirname:
mkdir_p(dirname)
kwargs = {}
lazy_load_size = self._get_lazy_load_size(elem)
if lazy_load_size is not None:
kwargs['lazy_load_size'] = lazy_load_size
buf = []
with open(filename, 'w') as f:
buf.append(self._make_title(elem))
buf.append(self._make_attrs(elem))
buf.append(self._make_nodes(elem, index=True, level=level,
**kwargs))
f.write(utf8_encode('\n'.join(buf)))
return res
def _make_title(self, elem):
return '# %s' % elem.name
def _filter_control_attrs(self, attrs):
if not self.momo_configs['momo_control_attr']:
return filter(lambda x: not x.name.startswith('__'), attrs)
return attrs
def _make_attrs(self, elem, **kwargs):
buf = []
if self.momo_configs['momo_attr_css']:
name_fmt = ('<span class="momo-attr-name '
'momo-attr-name-{name}">'
'{name}</span>')
else:
name_fmt = '{name}'
if self.momo_configs['momo_attr_css']:
content_fmt = ('<span class="momo-attr-content '
'momo-attr-content-{name}">'
'{content}</span>')
else:
content_fmt = '{content}'
if self.momo_configs['momo_attr_table']:
if elem.attr_svals:
buf.append('')
buf.append('|')
buf.append('- | -')
for attr in self._filter_control_attrs(elem.attr_svals):
buf.append(
txt_type(name_fmt + ' | ' + content_fmt).format(
name=attr.name,
content=self._make_attr_content(attr, **kwargs).strip()
)
)
buf.append('')
else:
for attr in self._filter_control_attrs(elem.attr_svals):
buf.append('\n')
buf.append(
'- %s:%s' % (attr.name,
self._make_attr_content(attr, **kwargs)))
buf.append(
txt_type('- ' + name_fmt + ':' + content_fmt).format(
name=attr.name,
content=self._make_attr_content(attr, **kwargs).strip()
)
)
return '\n'.join(buf)
def _make_attr_content(self, attr, **kwargs):
buf = []
if attr.has_items:
buf.append('\n')
for i, item in enumerate(attr.content, start=1):
if self.momo_configs['momo_attr_table']:
buf.append(self._make_link(item, **kwargs))
else:
buf.append(' - %s' % (self._make_link(item, **kwargs)))
else:
buf.append(' %s' % self._make_object(attr, **kwargs))
if self.momo_configs['momo_attr_table']:
if buf and buf[0] == '\n':
buf.pop(0)
return '<br>'.join(buf)
else:
return '\n'.join(buf)
def _make_object(self, attr, **kwargs):
name = attr.name
content = attr.content
if name.lower() in ('url', 'link'):
return self._make_link(content, **kwargs)
elif name.lower() in ('image'):
return self._make_image(content, **kwargs)
else:
return self._make_link(content, **kwargs)
return content
def _make_link(self, content, **kwargs):
if isinstance(content, txt_type) and content.startswith('http'):
content = '[%s](%s)' % (content, content)
return content
def _make_image(self, content, **kwargs):
res = '\n\n'
if isinstance(content, txt_type) and content.startswith('http'):
if 'lazy_load_size' in kwargs:
width, height = kwargs['lazy_load_size']
img = (
'<a href="{image}" title="image">'
'<img class="lazy" '
'data-original="{image}" '
'width="{width}px" '
'height="{height}px" '
'/></a>'
).format(image=content, width=width, height=height)
res += img
else:
res += '[]({image} "image")'.format(
image=content)
return res
def _make_nodes(self, elem, index=False, level=None, **kwargs):
buf = []
if not index:
for node in elem.node_svals:
this_kwargs = dict(kwargs) # get a fresh copy for each node
buf.append('## %s' % (node.name))
this_lazy_load_size = self._get_this_lazy_load_size(node)
if this_lazy_load_size is not None:
this_kwargs['lazy_load_size'] = this_lazy_load_size
buf.append(self._make_attrs(node, **this_kwargs))
else:
buf.append('### Nodes')
for node in elem.node_svals:
if level == self.momo_configs['momo_page_level']:
buf.append('- [%s](%s.md)' % (node.name, node.name))
else:
buf.append('- [%s](%s/index.md)' % (node.name, node.name))
return '\n'.join(buf)
def _make_mkdocs_yml(self):
mkdocs_yml = os.path.join(self.mkdocs_dir, 'mkdocs.yml')
with open(mkdocs_yml, 'w') as f:
yaml.dump(self.mkdocs_configs, f, default_flow_style=False,
allow_unicode=True)
def _serve(self, args=None):
os.chdir(self.mkdocs_dir)
cmd = 'mkdocs'
cmd_args = []
if not args:
cmd_args = ['serve']
else:
cmd_args.extend(args)
run_cmd(cmd=cmd, cmd_args=cmd_args)
def run(self, args=None):
pages = self._get_pages(self.root)
docs = self._get_docs()
self.mkdocs_configs['pages'] = pages + docs
self._make_mkdocs_yml()
self._serve(args)
plugin = Mkdocs()
|
{
"content_hash": "074b04463b03e0011c694fe1d12dba56",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 79,
"avg_line_length": 38.49683544303797,
"alnum_prop": 0.5071927661323469,
"repo_name": "shichao-an/momo",
"id": "c19d73a1ac5887d0b3c645920b673a668f3a1f63",
"size": "12165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "momo/plugins/mkdocs.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2415"
},
{
"name": "HTML",
"bytes": "12629"
},
{
"name": "Python",
"bytes": "103821"
}
],
"symlink_target": ""
}
|
"""
Copyright 2022 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import sheets_update_values
from base_test import BaseTest
class Testupdatesvalues(BaseTest):
"""Unit test for update value Sheet snippet"""
def test_update_values(self):
"""test updates_values"""
spreadsheet_id = self.create_test_spreadsheet()
result = sheets_update_values.update_values(spreadsheet_id,
'A1:B2', 'USER_ENTERED', [
['A', 'B'],
['C', 'D']
])
self.assertIsNotNone(result)
self.assertEqual(2, result.get('updatedRows'))
self.assertEqual(2, result.get('updatedColumns'))
self.assertEqual(4, result.get('updatedCells'))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "d10c1c44ceb32fb2c0946891d3cee2bd",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 78,
"avg_line_length": 37.8421052631579,
"alnum_prop": 0.605702364394993,
"repo_name": "gsuitedevs/python-samples",
"id": "82efa71dfc004279a4e48f5074cefba15ce608fa",
"size": "1438",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "sheets/snippets/test_sheets_update_values.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "401984"
}
],
"symlink_target": ""
}
|
"""
Definitions of standard actions
"""
from abc import abstractmethod
from obviousli.defs import State, Action, Truth
class GiveUpAction(Action):
"""
Do a single thing: give up.
"""
def __init__(self):
super(GiveUpAction, self).__init__(None)
def __lt__(self, other):
return isinstance(other, GiveUpAction)
def __call__(self, state):
return state.replace(source=state.target, truth=Truth.NEUTRAL, previous_state_action=(state, self))
def __str__(self):
return "GiveUp()"
class ActionTemplate(object):
"""
Can be applied on a state to generate actions.
"""
@abstractmethod
def generate(self, state):
"""
Generate all possible applications of this template to the state.
"""
return ()
class ActionGenerator(object):
"""
The action generator is responsible for generating all possible
actions given resources.
"""
def __init__(self, templates, fixed_actions=None):
#: A sequence of action templates
self._templates = templates
self._fixed_actions = fixed_actions
def __call__(self, state):
for template in self._templates:
yield from template.generate(state)
if self._fixed_actions:
yield from self._fixed_actions
# TODO(chaganty): implement the following actions.
# - Guess
# TODO(chaganty): allow the gradient to pass into the Guess action.
# - LexicalParaphrase
class LexicalParaphrase(Action):
"""
Replace a lexical sequence.
"""
def __init__(self, input_, output, apply_on_source=True, match_idx=None):
super(LexicalParaphrase, self).__init__(None)
self.input = input_
self.output = output
self.apply_on_source = apply_on_source
self.match_idx = match_idx
def __str__(self):
return "{}:{}@{} -> {}".format("S" if self.apply_on_source else "T", self.input, self.match_idx, self.output)
def __lt__(self, other):
if isinstance(other, LexicalParaphrase):
return self.input < other.input
else:
return True
def __call__(self, state):
if self.apply_on_source:
new_ = state.source[:self.match_idx] + self.output + state.source[self.match_idx + len(self.input):]
return state.replace(source=new_, previous_state_action=(state, self))
else:
new_ = state.target[:self.match_idx] + self.output + state.target[self.match_idx + len(self.input):]
return state.replace(target=new_, previous_state_action=(state, self))
return state
class LexicalParaphraseTemplate(ActionTemplate):
"""
Generates LexicalParaphrase actions for a given 'input'/'output' pair.
"""
def __init__(self, input_, output):
self.input = input_
self.output = output
def generate(self, state):
# check if input is in the source.
start = 0
while state.source.find(self.input, start) > -1:
idx = state.source.find(self.input, start)
yield LexicalParaphrase(self.input, self.output, apply_on_source=True, match_idx=idx)
start = idx+1
start = 0
while state.target.find(self.input, start) > -1:
idx = state.target.find(self.input, start)
yield LexicalParaphrase(self.input, self.output, apply_on_source=False, match_idx=idx)
start = idx+1
# - PhraseParaphrase
|
{
"content_hash": "a9d6df2eb5216907b97c76f34ee2c8c3",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 117,
"avg_line_length": 31.324324324324323,
"alnum_prop": 0.6160483175150993,
"repo_name": "arunchaganty/obviousli",
"id": "3091db1f617c41e75bb60f8c059943d7ff0b5739",
"size": "3524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "obviousli/actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "95585"
},
{
"name": "Shell",
"bytes": "3441"
}
],
"symlink_target": ""
}
|
from djinni.support import MultiSet # default imported in all files
from djinni.exception import CPyException # default imported in all files
from djinni.pycffi_marshal import CPyObject, CPyString
from PyCFFIlib_cffi import ffi, lib
from djinni import exception # this forces run of __init__.py which gives cpp option to call back into py to create exception
class ListStringHelper:
c_data_set = MultiSet()
@staticmethod
def check_c_data_set_empty():
assert len(ListStringHelper.c_data_set) == 0
@ffi.callback("struct DjinniString *(struct DjinniObjectHandle *, size_t)")
def __get_elem(cself, index):
try:
with CPyString.fromPy(CPyObject.toPy(None, cself)[index]) as py_obj:
_ret = py_obj.release_djinni_string()
assert _ret != ffi.NULL
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("size_t(struct DjinniObjectHandle *)")
def __get_size(cself):
return len(CPyObject.toPy(None, cself))
@ffi.callback("struct DjinniObjectHandle *()")
def __python_create():
c_ptr = ffi.new_handle(list())
ListStringHelper.c_data_set.add(c_ptr)
return ffi.cast("struct DjinniObjectHandle *", c_ptr)
@ffi.callback("void(struct DjinniObjectHandle *, struct DjinniString *)")
def __python_add(cself, el):
CPyObject.toPy(None, cself).append(CPyString.toPy(el))
@ffi.callback("void(struct DjinniObjectHandle * )")
def __delete(c_ptr):
assert c_ptr in ListStringHelper.c_data_set
ListStringHelper.c_data_set.remove(c_ptr)
@staticmethod
def _add_callbacks():
lib.list_string_add_callback__get_elem(ListStringHelper.__get_elem)
lib.list_string_add_callback___delete(ListStringHelper.__delete)
lib.list_string_add_callback__get_size(ListStringHelper.__get_size)
lib.list_string_add_callback__python_create(ListStringHelper.__python_create)
lib.list_string_add_callback__python_add(ListStringHelper.__python_add)
ListStringHelper._add_callbacks()
|
{
"content_hash": "3ba6cc1f87026945548ab4575b91f754",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 125,
"avg_line_length": 40.166666666666664,
"alnum_prop": 0.6758875057630245,
"repo_name": "trafi/djinni",
"id": "0b7ab7baaf14593f7a8e79857ca6cc0abc0e2e96",
"size": "2267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test-suite/generated-src/python/dh__list_string.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "80850"
},
{
"name": "C++",
"bytes": "735748"
},
{
"name": "CMake",
"bytes": "2811"
},
{
"name": "Dockerfile",
"bytes": "3250"
},
{
"name": "Java",
"bytes": "235096"
},
{
"name": "Lex",
"bytes": "4569"
},
{
"name": "Makefile",
"bytes": "9922"
},
{
"name": "Objective-C",
"bytes": "81700"
},
{
"name": "Objective-C++",
"bytes": "107146"
},
{
"name": "Python",
"bytes": "348883"
},
{
"name": "Scala",
"bytes": "323722"
},
{
"name": "Shell",
"bytes": "20173"
}
],
"symlink_target": ""
}
|
from django.views import generic
from .forms import TestForm
from .models import TestModel
class TestView(generic.UpdateView):
model = TestModel
form_class = TestForm
template_name = 'form.html'
def get_success_url(self):
return self.request.path
|
{
"content_hash": "e66b2771a434c0f4c18086b87c627553",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 35,
"avg_line_length": 21.153846153846153,
"alnum_prop": 0.72,
"repo_name": "Eraldo/django-autocomplete-light",
"id": "38d82b9100f2956d27393c6920e9362856365bfc",
"size": "275",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test_project/select2_generic_foreign_key/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44"
},
{
"name": "HTML",
"bytes": "4331"
},
{
"name": "JavaScript",
"bytes": "3248"
},
{
"name": "Python",
"bytes": "113829"
},
{
"name": "Shell",
"bytes": "2808"
}
],
"symlink_target": ""
}
|
"""
Use the AppVeyor API to download Windows artifacts.
Taken from: https://bitbucket.org/ned/coveragepy/src/tip/ci/download_appveyor.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""
from __future__ import unicode_literals
import argparse
import os
import zipfile
import requests
def make_auth_headers():
"""Make the authentication headers needed to use the Appveyor API."""
path = os.path.expanduser("~/.appveyor.token")
if not os.path.exists(path):
raise RuntimeError(
"Please create a file named `.appveyor.token` in your home directory. "
"You can get the token from https://ci.appveyor.com/api-token"
)
with open(path) as f:
token = f.read().strip()
headers = {
'Authorization': 'Bearer {}'.format(token),
}
return headers
def download_latest_artifacts(account_project, build_id):
"""Download all the artifacts from the latest build."""
if build_id is None:
url = "https://ci.appveyor.com/api/projects/{}".format(account_project)
else:
url = "https://ci.appveyor.com/api/projects/{}/build/{}".format(account_project, build_id)
build = requests.get(url, headers=make_auth_headers()).json()
jobs = build['build']['jobs']
print(u"Build {0[build][version]}, {1} jobs: {0[build][message]}".format(build, len(jobs)))
for job in jobs:
name = job['name']
print(u" {0}: {1[status]}, {1[artifactsCount]} artifacts".format(name, job))
url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts".format(job['jobId'])
response = requests.get(url, headers=make_auth_headers())
artifacts = response.json()
for artifact in artifacts:
is_zip = artifact['type'] == "Zip"
filename = artifact['fileName']
print(u" {0}, {1} bytes".format(filename, artifact['size']))
url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts/{}".format(job['jobId'], filename)
download_url(url, filename, make_auth_headers())
if is_zip:
unpack_zipfile(filename)
os.remove(filename)
def ensure_dirs(filename):
"""Make sure the directories exist for `filename`."""
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
def download_url(url, filename, headers):
"""Download a file from `url` to `filename`."""
ensure_dirs(filename)
response = requests.get(url, headers=headers, stream=True)
if response.status_code == 200:
with open(filename, 'wb') as f:
for chunk in response.iter_content(16 * 1024):
f.write(chunk)
else:
print(u" Error downloading {}: {}".format(url, response))
def unpack_zipfile(filename):
"""Unpack a zipfile, using the names in the zip."""
with open(filename, 'rb') as fzip:
z = zipfile.ZipFile(fzip)
for name in z.namelist():
print(u" extracting {}".format(name))
ensure_dirs(name)
z.extract(name)
parser = argparse.ArgumentParser(description='Download artifacts from AppVeyor.')
parser.add_argument('--id',
metavar='PROJECT_ID',
default='rfleschenberg/django-shop-rest-checkout',
help='Project ID in AppVeyor.')
parser.add_argument('build',
nargs='?',
metavar='BUILD_ID',
help='Build ID in AppVeyor. Eg: master-123')
if __name__ == "__main__":
# import logging
# logging.basicConfig(level="DEBUG")
args = parser.parse_args()
download_latest_artifacts(args.id, args.build)
|
{
"content_hash": "1e5c9b7145deb986a4a7238a917b84b9",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 104,
"avg_line_length": 35.31481481481482,
"alnum_prop": 0.6137912952281069,
"repo_name": "rfleschenberg/django-shop-rest-checkout",
"id": "e8098c888ce5f958686d0f4fbd782fb048704929",
"size": "3836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ci/appveyor-download.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1598"
},
{
"name": "Python",
"bytes": "15876"
}
],
"symlink_target": ""
}
|
from .about import *
|
{
"content_hash": "73e8389d352e499a3772441c1153bcea",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 20,
"avg_line_length": 21,
"alnum_prop": 0.7142857142857143,
"repo_name": "ryfeus/lambda-packs",
"id": "a55014485a1e94a14df8dfaf1bce1c2921d047c3",
"size": "21",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Spacy/source2.7/thinc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
import json
from twisted.trial import unittest
from hashlib import sha256
from nacl.public import PrivateKey, PublicKey, Box
from .common import TwoNodeMixin
from ..mailbox import channel
from ..mailbox.server import parseMsgA, parseMsgB
class msgC(TwoNodeMixin, unittest.TestCase):
def test_create_and_parse(self):
nA, nB, entA, entB = self.make_connected_nodes()
cidAB = entA["id"]
c = channel.OutboundChannel(nA.db, cidAB)
payload = {"hi": "there"}
msgC = c.createMsgC(payload)
self.failUnless(msgC.startswith("c0:"))
CIDToken, CIDBox, msgD = channel.parse_msgC(msgC)
CIDKey = entB["my_CID_key"].decode("hex")
seqnum, HmsgD, channel_pubkey = channel.decrypt_CIDBox(CIDKey, CIDBox)
self.failUnlessEqual(HmsgD, sha256(msgD).digest())
Bkey = PrivateKey(entB["my_new_channel_privkey"].decode("hex"))
keylist = [(Bkey, "keyid")]
keyid, pubkey2_s, msgE = channel.decrypt_msgD(msgD, keylist)
their_verfkey = entB["their_verfkey"].decode("hex")
seqnum, payload2_s = channel.check_msgE(msgE, pubkey2_s,
their_verfkey,
entB["highest_inbound_seqnum"])
self.failUnlessEqual(payload, json.loads(payload2_s))
def get_inbound_seqnum(self, db, cid):
c = db.execute("SELECT highest_inbound_seqnum FROM addressbook"
" WHERE id=?", (cid,))
return c.fetchone()[0]
def get_outbound_seqnum(self, db, cid):
c = db.execute("SELECT next_outbound_seqnum FROM addressbook"
" WHERE id=?", (cid,))
return c.fetchone()[0]
def test_channel_dispatch(self):
nA, nB, entA, entB = self.make_connected_nodes()
entA2, entB2 = self.add_new_channel(nA, nB)
entA3, entB3 = self.add_new_channel(nA, nB)
self.failUnlessEqual(self.get_outbound_seqnum(nA.db, entA2["id"]), 1)
self.failUnlessEqual(self.get_inbound_seqnum(nB.db, entB2["id"]), 0)
chan = channel.OutboundChannel(nA.db, entA2["id"])
payload = {"hi": "there"}
msgC = chan.createMsgC(payload)
self.failUnless(msgC.startswith("c0:"))
self.failUnlessEqual(self.get_outbound_seqnum(nA.db, entA2["id"]), 2)
self.failUnlessEqual(self.get_inbound_seqnum(nB.db, entB2["id"]), 0)
CIDToken, CIDBox, msgD = channel.parse_msgC(msgC)
# TODO: test CIDToken
# test CIDBox
cid,which_key = channel.find_channel_from_CIDBox(nB.db, CIDBox)
self.failUnlessEqual(cid, entB2["id"])
# the CIDBox claims to tell us which key to use. We won't actually
# use it unless it matches the cid that was able to open the CIDBox
privkey_s = entB2["my_new_channel_privkey"].decode("hex")
pubkey = PrivateKey(privkey_s).public_key.encode()
self.failUnlessEqual(which_key, pubkey)
self.failUnlessEqual(self.get_outbound_seqnum(nA.db, entA2["id"]), 2)
self.failUnlessEqual(self.get_inbound_seqnum(nB.db, entB2["id"]), 0)
# but other agents should not recognize this CIDBox
cid,which_key = channel.find_channel_from_CIDBox(nA.db, CIDBox)
self.failUnlessEqual(cid, None)
self.failUnlessEqual(which_key, None)
self.failUnlessEqual(self.get_outbound_seqnum(nA.db, entA2["id"]), 2)
self.failUnlessEqual(self.get_inbound_seqnum(nB.db, entB2["id"]), 0)
# this exercises the full processing path, which will increment both
# outbound and inbound seqnums
cid2, seqnum, payload2_s = channel.process_msgC(nB.db, msgC)
self.failUnlessEqual(cid2, entB2["id"])
self.failUnlessEqual(seqnum, 1)
self.failUnlessEqual(json.loads(payload2_s), payload)
self.failUnlessEqual(self.get_outbound_seqnum(nA.db, entA2["id"]), 2)
self.failUnlessEqual(self.get_inbound_seqnum(nB.db, entB2["id"]), 1)
class Send(TwoNodeMixin, unittest.TestCase):
def test_send(self):
nA, nB, entA, entB = self.make_connected_nodes()
d = nA.agent.send_message(entA["id"], {"hi": "world"})
def _sent(res):
msgA = res[0][1]
self.failUnless(msgA.startswith("a0:"))
pubkey1_s, boxed = parseMsgA(msgA)
tpriv_hex = self.tports2["local"]["retrieval"]["privkey"]
tpriv = PrivateKey(tpriv_hex.decode("hex"))
b = Box(tpriv, PublicKey(pubkey1_s))
msgB = b.decrypt(boxed)
MSTT, msgC = parseMsgB(msgB)
d.addCallback(_sent)
return d
|
{
"content_hash": "d3d55088be7e91033dc69dfed7698ac7",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 79,
"avg_line_length": 42.1,
"alnum_prop": 0.6190887497300799,
"repo_name": "warner/petmail",
"id": "030522a30e0995e579a53f6b9b5a7bf2fd709216",
"size": "4631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/petmail/test/test_channel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1894"
},
{
"name": "Emacs Lisp",
"bytes": "5279"
},
{
"name": "HTML",
"bytes": "9968"
},
{
"name": "JavaScript",
"bytes": "19036"
},
{
"name": "Makefile",
"bytes": "3247"
},
{
"name": "Python",
"bytes": "437895"
}
],
"symlink_target": ""
}
|
"""Middleware classes for johnny cache."""
from django.middleware import transaction as trans_middleware
from django.db import transaction
from johnny import cache, settings
class QueryCacheMiddleware(object):
"""
This middleware class monkey-patches django's ORM to maintain
generational info on each table (model) and to automatically cache all
querysets created via the ORM. This should be the first middleware
in your middleware stack.
"""
__state = {} # Alex Martelli's borg pattern
def __init__(self):
self.__dict__ = self.__state
self.disabled = settings.DISABLE_QUERYSET_CACHE
self.installed = getattr(self, 'installed', False)
if not self.installed and not self.disabled:
# when we install, lets refresh the blacklist, just in case johnny
# was loaded before the setting exists somehow...
cache.blacklist = settings.BLACKLIST
self.query_cache_backend = cache.get_backend()
self.query_cache_backend.patch()
self.installed = True
def unpatch(self):
self.query_cache_backend.unpatch()
self.query_cache_backend.flush_query_cache()
self.installed = False
class LocalStoreClearMiddleware(object):
"""
This middleware clears the localstore cache in `johnny.cache.local`
at the end of every request.
"""
def process_exception(self, *args, **kwargs):
cache.local.clear()
raise
def process_response(self, req, resp):
cache.local.clear()
return resp
class CommittingTransactionMiddleware(trans_middleware.TransactionMiddleware):
"""
A version of the built in TransactionMiddleware that always commits its
transactions, even if they aren't dirty.
"""
def process_response(self, request, response):
if transaction.is_managed():
try:
transaction.commit()
except:
pass
transaction.leave_transaction_management()
return response
|
{
"content_hash": "a2ea8771098837b865e92cc00dcd8c7e",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 33.73770491803279,
"alnum_prop": 0.6574344023323615,
"repo_name": "havard024/prego",
"id": "3df96611aaa45bd919ce934c002c3ff2d3fdbb61",
"size": "2105",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/johnny/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2978"
},
{
"name": "CSS",
"bytes": "620190"
},
{
"name": "JavaScript",
"bytes": "2456120"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "34948766"
},
{
"name": "Shell",
"bytes": "12359"
},
{
"name": "TeX",
"bytes": "113674"
}
],
"symlink_target": ""
}
|
"""Boto3 session example."""
import boto3.session
from dependency_injector import containers, providers
class Service:
def __init__(self, s3_client, sqs_client):
self.s3_client = s3_client
self.sqs_client = sqs_client
class Container(containers.DeclarativeContainer):
config = providers.Configuration()
session = providers.Resource(
boto3.session.Session,
aws_access_key_id=config.aws_access_key_id,
aws_secret_access_key=config.aws_secret_access_key,
aws_session_token=config.aws_session_token,
)
s3_client = providers.Resource(
session.provided.client.call(),
service_name="s3",
)
sqs_client = providers.Resource(
providers.MethodCaller(session.provided.client), # Alternative syntax
service_name="sqs",
)
service1 = providers.Factory(
Service,
s3_client=s3_client,
sqs_client=sqs_client,
)
service2 = providers.Factory(
Service,
s3_client=session.provided.client.call(service_name="s3"), # Alternative inline syntax
sqs_client=session.provided.client.call(service_name="sqs"), # Alternative inline syntax
)
def main():
container = Container()
container.config.aws_access_key_id.from_env("AWS_ACCESS_KEY_ID")
container.config.aws_secret_access_key.from_env("AWS_SECRET_ACCESS_KEY")
container.config.aws_session_token.from_env("AWS_SESSION_TOKEN")
container.init_resources()
s3_client = container.s3_client()
print(s3_client)
sqs_client = container.sqs_client()
print(sqs_client)
service1 = container.service1()
print(service1, service1.s3_client, service1.sqs_client)
assert service1.s3_client is s3_client
assert service1.sqs_client is sqs_client
service2 = container.service2()
print(service2, service2.s3_client, service2.sqs_client)
assert service2.s3_client.__class__.__name__ == "S3"
assert service2.sqs_client.__class__.__name__ == "SQS"
if __name__ == "__main__":
main()
|
{
"content_hash": "43f514ad6a285e036e513b75cabf7957",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 97,
"avg_line_length": 28.51388888888889,
"alnum_prop": 0.665854846566001,
"repo_name": "ets-labs/dependency_injector",
"id": "33ed85f6dd4ffb0cf0fd7d12844a69a572b02a71",
"size": "2053",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/miniapps/boto3-session/boto3_session_example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "171148"
}
],
"symlink_target": ""
}
|
import os
from distutils.core import setup
from pyngdom import __version__
try:
f = open(os.path.join(os.path.dirname(__file__), 'README.rst'))
long_description = f.read()
f.close()
except:
long_description = ''
setup(
name='pyngdom',
version=__version__,
packages=['pyngdom'],
author='Alvaro Leiva',
author_email='aleivag@gmail.com',
url='https://github.com/Epi10/pyngdom',
download_url='https://github.com/Epi10/pyngdom/releases/tag/%s' % __version__,
classifiers=[
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Topic :: System :: Monitoring"
],
keywords=['monitoring', 'rum', 'pingdom'],
description='A simple pingdom API interface for read RUM information',
long_description=long_description,
license='MIT'
)
|
{
"content_hash": "1ceab4594e64af2c6f6d5725f21043f3",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 82,
"avg_line_length": 30.314285714285713,
"alnum_prop": 0.6248821866163996,
"repo_name": "Epi10/pyngdom",
"id": "3f34507bf59f858513080a161a5c8e56315d67dc",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11664"
}
],
"symlink_target": ""
}
|
from flask import g
from oauthlib.oauth2.rfc6749.clients.web_application import WebApplicationClient
from werkzeug.local import LocalProxy
from flask_dance.consumer import OAuth2ConsumerBlueprint
__maintainer__ = "Ryan Schaffer <schaffer.ry@gmail.com>"
AUTH_HEADER = "auth_header"
URI_QUERY = "query"
BODY = "body"
ZOHO_TOKEN_HEADER = "Zoho-oauthtoken"
def make_zoho_blueprint(
client_id=None,
client_secret=None,
*,
scope=None,
redirect_url=None,
offline=False,
redirect_to=None,
login_url=None,
session_class=None,
storage=None,
reprompt_consent=False,
rule_kwargs=None,
):
"""
Make a blueprint for authenticating with Zoho using OAuth 2. This requires
a client ID and client secret from Zoho. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`ZOHO_OAUTH_CLIENT_ID` and
:envvar:`ZOHO_OAUTH_CLIENT_SECRET`.
IMPORTANT: Configuring the base_url is not supported in this config.
Args:
client_id (str): The client ID for your application on Zoho.
client_secret (str): The client secret for your application on Zoho
scope (list, optional): list of scopes (str) for the OAuth token
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/zoho``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/zoho/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.consumer.requests.OAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
offline (bool): Whether to request `offline access`
for the OAuth token. Defaults to False
reprompt_consent (bool): If True, force Zoho to re-prompt the user
for their consent, even if the user has already given their
consent. Defaults to False.
rule_kwargs (dict, optional): Additional arguments that should be passed when adding
the login and authorized routes. Defaults to ``None``.
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :doc:`blueprint <flask:blueprints>` to attach to your Flask app.
"""
scope = scope or ["ZohoCRM.users.all"]
base_url = "https://www.zohoapis.com/"
client = ZohoWebClient(client_id, token_type=ZOHO_TOKEN_HEADER)
authorization_url_params = {}
authorization_url_params["access_type"] = "offline" if offline else "online"
if reprompt_consent:
authorization_url_params["prompt"] = "consent"
zoho_bp = OAuth2ConsumerBlueprint(
"zoho",
__name__,
client_id=client_id,
client_secret=client_secret,
client=client,
scope=scope,
base_url=base_url,
token_url="https://accounts.zoho.com/oauth/v2/token",
authorization_url="https://accounts.zoho.com/oauth/v2/auth",
authorization_url_params=authorization_url_params,
redirect_url=redirect_url,
redirect_to=redirect_to,
login_url=login_url,
session_class=session_class,
storage=storage,
rule_kwargs=rule_kwargs,
)
zoho_bp.from_config["client_id"] = "ZOHO_OAUTH_CLIENT_ID"
zoho_bp.from_config["client_secret"] = "ZOHO_OAUTH_CLIENT_SECRET"
@zoho_bp.before_app_request
def set_applocal_session():
g.flask_dance_zoho = zoho_bp.session
return zoho_bp
zoho = LocalProxy(lambda: g.flask_dance_zoho)
class ZohoWebClient(WebApplicationClient):
"""
Remove the requirement that token_types adhere to OAuth Standard
"""
@property
def token_types(self):
return {
"Bearer": self._add_bearer_token,
"MAC": self._add_mac_token,
ZOHO_TOKEN_HEADER: self._add_zoho_token,
}
def _add_zoho_token(
self, uri, http_method="GET", body=None, headers=None, token_placement=None
):
"""Add a zoho token to the request uri, body or authorization header. follows bearer pattern"""
headers = self.prepare_zoho_headers(self.access_token, headers)
return uri, headers, body
@staticmethod
def prepare_zoho_headers(token, headers=None):
"""Add a `Zoho Token`_ to the request URI.
Recommended method of passing bearer tokens.
Authorization: Zoho-oauthtoken h480djs93hd8
.. _`Zoho-oauthtoken Token`: custom zoho token
"""
headers = headers or {}
headers["Authorization"] = "{token_header} {token}".format(
token_header=ZOHO_TOKEN_HEADER, token=token
)
return headers
|
{
"content_hash": "cf422ead408e699218c8b0a19ccb4192",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 103,
"avg_line_length": 37.856115107913666,
"alnum_prop": 0.6554541999239832,
"repo_name": "singingwolfboy/flask-dance",
"id": "12f25716c07716925d307dffe3095f9b4216ea68",
"size": "5262",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "flask_dance/contrib/zoho.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "329946"
}
],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
class Author(models.Model):
name = models.CharField(max_length=127, unique=True)
def __str__(self):
return self.name
class Gender(models.Model):
name = models.CharField(max_length=3, unique=True)
description = models.CharField(max_length=15)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.description
class Token(models.Model):
name = models.CharField(max_length=31, unique=True)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
class Desig(models.Model):
token = models.ForeignKey(Token, null=True, on_delete=models.SET_NULL)
gender = models.ForeignKey(Gender, null=True, on_delete=models.SET_NULL)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
uid = models.IntegerField(default=1)
def __str__(self):
return str(self.token) + " (" + str(self.gender) + ")"
def __lt__(self, other):
return (self.id < other.id)
def evaluable(self):
return "Desig.objects.filter(token__name='{}').filter(gender__name='{}')[0]".format(self.token.name, self.gender.name)
def mini(self):
return "findDesig('{}', '{}')".format(self.token.name, self.gender.name)
class Lang(models.Model):
name = models.CharField(max_length=47, unique=True)
short = models.CharField(max_length=2, unique=True)
long = models.CharField(max_length=3, unique=True)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.long
class ScopeType(models.Model):
name = models.CharField(max_length=31, unique=True)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
class Scope(models.Model):
desig = models.ForeignKey(Desig, null=True, on_delete=models.SET_NULL)
scopeType = models.ForeignKey(ScopeType, null=True, on_delete=models.SET_NULL)
lang = models.ForeignKey(Lang, null=True, on_delete=models.SET_NULL)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return str(self.desig) + ": " + str(self.scopeType) + "=" + str(self.lang)
class Utter(models.Model):
name = models.CharField(max_length=47, unique=True)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
class Pronounce(models.Model):
desig = models.ForeignKey(Desig, null=True, on_delete=models.SET_NULL)
utter = models.ForeignKey(Utter, null=True, on_delete=models.SET_NULL)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return str(self.desig) + str(self.utter)
class Survey(models.Model):
name = models.CharField(max_length=47)
year = models.IntegerField(default=0)
gender = models.ForeignKey(Gender, null=True, on_delete=models.SET_NULL)
basis = models.IntegerField(default=0)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
class Freq(models.Model):
desig = models.ForeignKey(Desig, null=True, on_delete=models.SET_NULL)
survey = models.ForeignKey(Survey, null=True, on_delete=models.SET_NULL)
count = models.IntegerField(default=0)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return str(self.desig) + " " + str(self.survey) + " " + str(self.count)
class Idea(models.Model):
name = models.CharField(max_length=47, unique=True)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
class Meaning(models.Model):
desig = models.ForeignKey(Desig, null=True, on_delete=models.SET_NULL)
idea = models.ForeignKey(Idea, null=True, on_delete=models.SET_NULL)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return str(self.desig) + " " + str(self.idea)
class Onto(models.Model):
child = models.ForeignKey(Idea, null=True, on_delete=models.SET_NULL, related_name='onto_child_set')
parent = models.ForeignKey(Idea, null=True, on_delete=models.SET_NULL, related_name='onto_parent_set')
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return str(self.child) + "<<--" + str(self.parent)
class RelType(models.Model):
name = models.CharField(max_length=31, unique=True)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
class Rel(models.Model):
relType = models.ForeignKey(RelType, null=True, on_delete=models.SET_NULL)
core = models.ForeignKey(Desig, null=True, on_delete=models.SET_NULL, related_name='related_core_set')
satellite = models.ForeignKey(Desig, null=True, on_delete=models.SET_NULL, related_name='related_satellite_set')
def __str__(self):
return "{}({}, {})".format(self.relType, self.core, self.satellite)
|
{
"content_hash": "744c8fbf0b05e1eddc08eaf291f61226",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 126,
"avg_line_length": 43.53333333333333,
"alnum_prop": 0.6822358346094947,
"repo_name": "philpot/tocayo",
"id": "8a321b338c0dd5106dad3ad8edebb7bfd1e1bc21",
"size": "5224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tocayoproj/tocayoapp/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "187326"
},
{
"name": "CLIPS",
"bytes": "159266"
},
{
"name": "Common Lisp",
"bytes": "970346"
},
{
"name": "Groff",
"bytes": "23493"
},
{
"name": "HTML",
"bytes": "10146"
},
{
"name": "Makefile",
"bytes": "2911"
},
{
"name": "Python",
"bytes": "408154"
},
{
"name": "Shell",
"bytes": "2020"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
customer_data = {
'username': 'panosl',
'first_name': 'Panos',
'last_name': 'Laganakos',
'email': 'panos.laganakos@gmail.com',
'address': 'omirou 17',
'city': 'Kalamata',
'country': '1',
'password1': '12345',
'password2': '12345',
}
class CustomerCreationTest(TestCase):
fixtures = ['countries.yaml']
def test_view(self):
response = self.client.get('/customer/register/')
self.failUnlessEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'customer.html')
def test_creation(self):
response = self.client.post('/customer/register/', customer_data)
self.assertRedirects(response, '/', status_code=302, target_status_code=301)
|
{
"content_hash": "fe053b94dccd027a9fd2ccc711c84e68",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 25.925925925925927,
"alnum_prop": 0.7014285714285714,
"repo_name": "panosl/helios",
"id": "7f1f79f25ec35b122bee1d2c4536690fb0757dc8",
"size": "780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helios/customers/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "87902"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='sorl-thumbnail',
use_scm_version=True,
description='Thumbnails for Django',
long_description=open('README.rst').read(),
author="Mikko Hellsing",
author_email='mikko@aino.se',
maintainer="Jazzband",
maintainer_email="roadies@jazzband.co",
license="BSD",
url='https://github.com/jazzband/sorl-thumbnail',
packages=find_packages(exclude=['tests', 'tests.*']),
platforms='any',
python_requires='>=3.7',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Multimedia :: Graphics',
'Framework :: Django',
'Framework :: Django :: 3.2',
'Framework :: Django :: 4.0',
'Framework :: Django :: 4.1',
],
setup_requires=['setuptools_scm'],
)
|
{
"content_hash": "afc425ee6f4cd18e671e469e7f00cec8",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 59,
"avg_line_length": 35.62162162162162,
"alnum_prop": 0.5986342943854325,
"repo_name": "jazzband/sorl-thumbnail",
"id": "c5d60b234308cfd31e16b7e8c677c3a02752cc0c",
"size": "1318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "5232"
},
{
"name": "Python",
"bytes": "151859"
},
{
"name": "Shell",
"bytes": "638"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
"""
Example usage for the LearnTransducer class. The algorithm is used to infer
a sample python implementation of the ReplaceComments() function used by
Mod-Security to deobfuscate inputs before they are passed through SQL injection
filters.
For details on the inference algorithm see the paper
* Back in Black: Towards Formal, Black-Box Analysis of Sanitizers and Filters
George Argyros, Ioannis Stais, Angelos D. Keromytis and Aggelos Kiayias
"""
import argparse
import random
# Importing from ./context.py is performed to avoid assumptions on the location
# of the library on the system. If library is installed then `import sflearn`
# can be used.
from context import BekProgram, TransducerLearner
def replace_comments(inp):
"""""
Sample implementation of the ReplaceComments function from Mod-Security.
The function will remove all strings matching the /* */ style comments
and replace them with a space.
See function msre_fn_replaceComments_execute in the Mod-Security source
code.
"""
state = 0
out = ''
i = 0
while i < len(inp):
if state == 0 and inp[i:i+2] == "/*":
i += 2
state = 1
out += ' '
continue
if state == 1 and inp[i:i+2] == "*/":
i += 2
state = 0
continue
if state == 0:
out += inp[i]
i += 1
elif state == 1:
i += 1
return out
class ReplaceCommentsLearner(TransducerLearner):
"""
The class implements membership and equivalence queries for the
ReplaceComments() function.
"""
def __init__(self, I):
super(ReplaceCommentsLearner, self).__init__(I)
def membership_query(self, inp):
"""
Convert the input back to a string and run it through the encoder.
"""
inp_enc = [chr(c) for c in inp]
out = replace_comments(''.join(inp_enc))
return [ord(c) for c in out]
def equivalence_query(self, M):
"""
Run the sanitizer on a bunch of random inputs and declare it correct
if no counterexample is found.
"""
tests = ['////*aaaaaa*/', '/*aaaaa*/', '*/aaaaa', 'aaaaaa/*aaaaaa']
# Mix tests together with random strings
max_len = 10
tests_num = 100
for _ in xrange(tests_num):
inp = []
for _ in xrange(max_len):
inp += [random.choice(self.I)]
if random.randint(0, 10) == 5:
vector = random.choice(tests)
inp += [ord(c) for c in vector]
if M.consume_input(inp) != self.membership_query(inp):
return False, inp
return True, None
def _create_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--out", default="replacecomments", dest="outfile",
help="Filename to save the transducer")
parser.add_argument("--bek", default=False, action="store_true", dest="save_bek",
help="Save transducer in BEK program format")
return parser
def main():
parser = _create_argument_parser()
args = parser.parse_args()
I = [ord(c) for c in set([x for x in '/**/abc'])]
replace_comments_learner = ReplaceCommentsLearner(I)
print '[+] Learning ReplaceComments() function: ',
sanitizer = replace_comments_learner.learn_transducer()
print 'OK'
print '[+] Saving transducer model in file {}.txt: '.format(args.outfile),
sanitizer.save(args.outfile + '.txt')
print 'OK'
if args.save_bek:
print '[+] Saving BEK program in file {}.bek: '.format(args.outfile),
bek = BekProgram()
bek.create_from_transducer(sanitizer)
bek.save(args.outfile + '.bek')
print 'OK'
if __name__ == '__main__':
main()
|
{
"content_hash": "29255f4fcccb38ad5412ab6ce4eb366b",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 85,
"avg_line_length": 30.873015873015873,
"alnum_prop": 0.5956298200514138,
"repo_name": "GeorgeArgyros/sflearn",
"id": "b96456260e73c2d0d8e204f6747777147ea2c4cb",
"size": "3890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/replacecomments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53203"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "BoxCox", sigma = 0.0, exog_count = 100, ar_order = 12);
|
{
"content_hash": "aa18f884040987697c376c623b2117ff",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 162,
"avg_line_length": 37.42857142857143,
"alnum_prop": 0.7022900763358778,
"repo_name": "antoinecarme/pyaf",
"id": "84080296487f9ee5ff2c39e05b117ef02de54cc1",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_BoxCox/trend_PolyTrend/cycle_7/ar_12/test_artificial_32_BoxCox_PolyTrend_7_12_100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""Abseil Python logging module implemented on top of standard logging.
Simple usage::
from absl import logging
logging.info('Interesting Stuff')
logging.info('Interesting Stuff with Arguments: %d', 42)
logging.set_verbosity(logging.INFO)
logging.log(logging.DEBUG, 'This will *not* be printed')
logging.set_verbosity(logging.DEBUG)
logging.log(logging.DEBUG, 'This will be printed')
logging.warning('Worrying Stuff')
logging.error('Alarming Stuff')
logging.fatal('AAAAHHHHH!!!!') # Process exits.
Usage note: Do not pre-format the strings in your program code.
Instead, let the logging module perform argument interpolation.
This saves cycles because strings that don't need to be printed
are never formatted. Note that this module does not attempt to
interpolate arguments when no arguments are given. In other words::
logging.info('Interesting Stuff: %s')
does not raise an exception because logging.info() has only one
argument, the message string.
"Lazy" evaluation for debugging
-------------------------------
If you do something like this::
logging.debug('Thing: %s', thing.ExpensiveOp())
then the ExpensiveOp will be evaluated even if nothing
is printed to the log. To avoid this, use the level_debug() function::
if logging.level_debug():
logging.debug('Thing: %s', thing.ExpensiveOp())
Per file level logging is supported by logging.vlog() and
logging.vlog_is_on(). For example::
if logging.vlog_is_on(2):
logging.vlog(2, very_expensive_debug_message())
Notes on Unicode
----------------
The log output is encoded as UTF-8. Don't pass data in other encodings in
bytes() instances -- instead pass unicode string instances when you need to
(for both the format string and arguments).
Note on critical and fatal:
Standard logging module defines fatal as an alias to critical, but it's not
documented, and it does NOT actually terminate the program.
This module only defines fatal but not critical, and it DOES terminate the
program.
The differences in behavior are historical and unfortunate.
"""
import collections
from collections import abc
import getpass
import io
import itertools
import logging
import os
import socket
import struct
import sys
import threading
import time
import timeit
import traceback
import types
import warnings
from absl import flags
from absl.logging import converter
try:
from typing import NoReturn
except ImportError:
pass
FLAGS = flags.FLAGS
# Logging levels.
FATAL = converter.ABSL_FATAL
ERROR = converter.ABSL_ERROR
WARNING = converter.ABSL_WARNING
WARN = converter.ABSL_WARNING # Deprecated name.
INFO = converter.ABSL_INFO
DEBUG = converter.ABSL_DEBUG
# Regex to match/parse log line prefixes.
ABSL_LOGGING_PREFIX_REGEX = (
r'^(?P<severity>[IWEF])'
r'(?P<month>\d\d)(?P<day>\d\d) '
r'(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)'
r'\.(?P<microsecond>\d\d\d\d\d\d) +'
r'(?P<thread_id>-?\d+) '
r'(?P<filename>[a-zA-Z<][\w._<>-]+):(?P<line>\d+)')
# Mask to convert integer thread ids to unsigned quantities for logging purposes
_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1
# Extra property set on the LogRecord created by ABSLLogger when its level is
# CRITICAL/FATAL.
_ABSL_LOG_FATAL = '_absl_log_fatal'
# Extra prefix added to the log message when a non-absl logger logs a
# CRITICAL/FATAL message.
_CRITICAL_PREFIX = 'CRITICAL - '
# Used by findCaller to skip callers from */logging/__init__.py.
_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.')
# The ABSL logger instance, initialized in _initialize().
_absl_logger = None
# The ABSL handler instance, initialized in _initialize().
_absl_handler = None
_CPP_NAME_TO_LEVELS = {
'debug': '0', # Abseil C++ has no DEBUG level, mapping it to INFO here.
'info': '0',
'warning': '1',
'warn': '1',
'error': '2',
'fatal': '3'
}
_CPP_LEVEL_TO_NAMES = {
'0': 'info',
'1': 'warning',
'2': 'error',
'3': 'fatal',
}
class _VerbosityFlag(flags.Flag):
"""Flag class for -v/--verbosity."""
def __init__(self, *args, **kwargs):
super(_VerbosityFlag, self).__init__(
flags.IntegerParser(),
flags.ArgumentSerializer(),
*args, **kwargs)
@property
def value(self):
return self._value
@value.setter
def value(self, v):
self._value = v
self._update_logging_levels()
def _update_logging_levels(self):
"""Updates absl logging levels to the current verbosity.
Visibility: module-private
"""
if not _absl_logger:
return
if self._value <= converter.ABSL_DEBUG:
standard_verbosity = converter.absl_to_standard(self._value)
else:
# --verbosity is set to higher than 1 for vlog.
standard_verbosity = logging.DEBUG - (self._value - 1)
# Also update root level when absl_handler is used.
if _absl_handler in logging.root.handlers:
# Make absl logger inherit from the root logger. absl logger might have
# a non-NOTSET value if logging.set_verbosity() is called at import time.
_absl_logger.setLevel(logging.NOTSET)
logging.root.setLevel(standard_verbosity)
else:
_absl_logger.setLevel(standard_verbosity)
class _LoggerLevelsFlag(flags.Flag):
"""Flag class for --logger_levels."""
def __init__(self, *args, **kwargs):
super(_LoggerLevelsFlag, self).__init__(
_LoggerLevelsParser(),
_LoggerLevelsSerializer(),
*args, **kwargs)
@property
def value(self):
# For lack of an immutable type, be defensive and return a copy.
# Modifications to the dict aren't supported and won't have any affect.
# While Py3 could use MappingProxyType, that isn't deepcopy friendly, so
# just return a copy.
return self._value.copy()
@value.setter
def value(self, v):
self._value = {} if v is None else v
self._update_logger_levels()
def _update_logger_levels(self):
# Visibility: module-private.
# This is called by absl.app.run() during initialization.
for name, level in self._value.items():
logging.getLogger(name).setLevel(level)
class _LoggerLevelsParser(flags.ArgumentParser):
"""Parser for --logger_levels flag."""
def parse(self, value):
if isinstance(value, abc.Mapping):
return value
pairs = [pair.strip() for pair in value.split(',') if pair.strip()]
# Preserve the order so that serialization is deterministic.
levels = collections.OrderedDict()
for name_level in pairs:
name, level = name_level.split(':', 1)
name = name.strip()
level = level.strip()
levels[name] = level
return levels
class _LoggerLevelsSerializer(object):
"""Serializer for --logger_levels flag."""
def serialize(self, value):
if isinstance(value, str):
return value
return ','.join(
'{}:{}'.format(name, level) for name, level in value.items())
class _StderrthresholdFlag(flags.Flag):
"""Flag class for --stderrthreshold."""
def __init__(self, *args, **kwargs):
super(_StderrthresholdFlag, self).__init__(
flags.ArgumentParser(),
flags.ArgumentSerializer(),
*args, **kwargs)
@property
def value(self):
return self._value
@value.setter
def value(self, v):
if v in _CPP_LEVEL_TO_NAMES:
# --stderrthreshold also accepts numeric strings whose values are
# Abseil C++ log levels.
cpp_value = int(v)
v = _CPP_LEVEL_TO_NAMES[v] # Normalize to strings.
elif v.lower() in _CPP_NAME_TO_LEVELS:
v = v.lower()
if v == 'warn':
v = 'warning' # Use 'warning' as the canonical name.
cpp_value = int(_CPP_NAME_TO_LEVELS[v])
else:
raise ValueError(
'--stderrthreshold must be one of (case-insensitive) '
"'debug', 'info', 'warning', 'error', 'fatal', "
"or '0', '1', '2', '3', not '%s'" % v)
self._value = v
flags.DEFINE_boolean('logtostderr',
False,
'Should only log to stderr?', allow_override_cpp=True)
flags.DEFINE_boolean('alsologtostderr',
False,
'also log to stderr?', allow_override_cpp=True)
flags.DEFINE_string('log_dir',
os.getenv('TEST_TMPDIR', ''),
'directory to write logfiles into',
allow_override_cpp=True)
flags.DEFINE_flag(_VerbosityFlag(
'verbosity', -1,
'Logging verbosity level. Messages logged at this level or lower will '
'be included. Set to 1 for debug logging. If the flag was not set or '
'supplied, the value will be changed from the default of -1 (warning) to '
'0 (info) after flags are parsed.',
short_name='v', allow_hide_cpp=True))
flags.DEFINE_flag(
_LoggerLevelsFlag(
'logger_levels', {},
'Specify log level of loggers. The format is a CSV list of '
'`name:level`. Where `name` is the logger name used with '
'`logging.getLogger()`, and `level` is a level name (INFO, DEBUG, '
'etc). e.g. `myapp.foo:INFO,other.logger:DEBUG`'))
flags.DEFINE_flag(_StderrthresholdFlag(
'stderrthreshold', 'fatal',
'log messages at this level, or more severe, to stderr in '
'addition to the logfile. Possible values are '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'Obsoletes --alsologtostderr. Using --alsologtostderr '
'cancels the effect of this flag. Please also note that '
'this flag is subject to --verbosity and requires logfile '
'not be stderr.', allow_hide_cpp=True))
flags.DEFINE_boolean('showprefixforinfo', True,
'If False, do not prepend prefix to info messages '
'when it\'s logged to stderr, '
'--verbosity is set to INFO level, '
'and python logging is used.')
def get_verbosity():
"""Returns the logging verbosity."""
return FLAGS['verbosity'].value
def set_verbosity(v):
"""Sets the logging verbosity.
Causes all messages of level <= v to be logged,
and all messages of level > v to be silently discarded.
Args:
v: int|str, the verbosity level as an integer or string. Legal string values
are those that can be coerced to an integer as well as case-insensitive
'debug', 'info', 'warning', 'error', and 'fatal'.
"""
try:
new_level = int(v)
except ValueError:
new_level = converter.ABSL_NAMES[v.upper()]
FLAGS.verbosity = new_level
def set_stderrthreshold(s):
"""Sets the stderr threshold to the value passed in.
Args:
s: str|int, valid strings values are case-insensitive 'debug',
'info', 'warning', 'error', and 'fatal'; valid integer values are
logging.DEBUG|INFO|WARNING|ERROR|FATAL.
Raises:
ValueError: Raised when s is an invalid value.
"""
if s in converter.ABSL_LEVELS:
FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
FLAGS.stderrthreshold = s
else:
raise ValueError(
'set_stderrthreshold only accepts integer absl logging level '
'from -3 to 1, or case-insensitive string values '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'But found "{}" ({}).'.format(s, type(s)))
def fatal(msg, *args, **kwargs):
# type: (Any, Any, Any) -> NoReturn
"""Logs a fatal message."""
log(FATAL, msg, *args, **kwargs)
def error(msg, *args, **kwargs):
"""Logs an error message."""
log(ERROR, msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""Logs a warning message."""
log(WARNING, msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
"""Deprecated, use 'warning' instead."""
warnings.warn("The 'warn' function is deprecated, use 'warning' instead",
DeprecationWarning, 2)
log(WARNING, msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""Logs an info message."""
log(INFO, msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""Logs a debug message."""
log(DEBUG, msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""Logs an exception, with traceback and message."""
error(msg, *args, **kwargs, exc_info=True)
# Counter to keep track of number of log entries per token.
_log_counter_per_token = {}
def _get_next_log_count_per_token(token):
"""Wrapper for _log_counter_per_token. Thread-safe.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0).
"""
# Can't use a defaultdict because defaultdict isn't atomic, whereas
# setdefault is.
return next(_log_counter_per_token.setdefault(token, itertools.count()))
def log_every_n(level, msg, n, *args):
"""Logs ``msg % args`` at level 'level' once per 'n' times.
Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
Not threadsafe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n: int, the number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
"""
count = _get_next_log_count_per_token(get_absl_logger().findCaller())
log_if(level, msg, not (count % n), *args)
# Keeps track of the last log time of the given token.
# Note: must be a dict since set/get is atomic in CPython.
# Note: entries are never released as their number is expected to be low.
_log_timer_per_token = {}
def _seconds_have_elapsed(token, num_seconds):
"""Tests if 'num_seconds' have passed since 'token' was requested.
Not strictly thread-safe - may log with the wrong frequency if called
concurrently from multiple threads. Accuracy depends on resolution of
'timeit.default_timer()'.
Always returns True on the first call for a given 'token'.
Args:
token: The token for which to look up the count.
num_seconds: The number of seconds to test for.
Returns:
Whether it has been >= 'num_seconds' since 'token' was last requested.
"""
now = timeit.default_timer()
then = _log_timer_per_token.get(token, None)
if then is None or (now - then) >= num_seconds:
_log_timer_per_token[token] = now
return True
else:
return False
def log_every_n_seconds(level, msg, n_seconds, *args):
"""Logs ``msg % args`` at level ``level`` iff ``n_seconds`` elapsed since last call.
Logs the first call, logs subsequent calls if 'n' seconds have elapsed since
the last logging call from the same call site (file + line). Not thread-safe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n_seconds: float or int, seconds which should elapse before logging again.
*args: The args to be substituted into the msg.
"""
should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds)
log_if(level, msg, should_log, *args)
def log_first_n(level, msg, n, *args):
"""Logs ``msg % args`` at level ``level`` only first ``n`` times.
Not threadsafe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n: int, the maximal number of times the message is logged.
*args: The args to be substituted into the msg.
"""
count = _get_next_log_count_per_token(get_absl_logger().findCaller())
log_if(level, msg, count < n, *args)
def log_if(level, msg, condition, *args):
"""Logs ``msg % args`` at level ``level`` only if condition is fulfilled."""
if condition:
log(level, msg, *args)
def log(level, msg, *args, **kwargs):
"""Logs ``msg % args`` at absl logging level ``level``.
If no args are given just print msg, ignoring any interpolation specifiers.
Args:
level: int, the absl logging level at which to log the message
(logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging
level constants are also supported, callers should prefer explicit
logging.vlog() calls for such purpose.
msg: str, the message to be logged.
*args: The args to be substituted into the msg.
**kwargs: May contain exc_info to add exception traceback to message.
"""
if level > converter.ABSL_DEBUG:
# Even though this function supports level that is greater than 1, users
# should use logging.vlog instead for such cases.
# Treat this as vlog, 1 is equivalent to DEBUG.
standard_level = converter.STANDARD_DEBUG - (level - 1)
else:
if level < converter.ABSL_FATAL:
level = converter.ABSL_FATAL
standard_level = converter.absl_to_standard(level)
# Match standard logging's behavior. Before use_absl_handler() and
# logging is configured, there is no handler attached on _absl_logger nor
# logging.root. So logs go no where.
if not logging.root.handlers:
logging.basicConfig()
_absl_logger.log(standard_level, msg, *args, **kwargs)
def vlog(level, msg, *args, **kwargs):
"""Log ``msg % args`` at C++ vlog level ``level``.
Args:
level: int, the C++ verbose logging level at which to log the message,
e.g. 1, 2, 3, 4... While absl level constants are also supported,
callers should prefer logging.log|debug|info|... calls for such purpose.
msg: str, the message to be logged.
*args: The args to be substituted into the msg.
**kwargs: May contain exc_info to add exception traceback to message.
"""
log(level, msg, *args, **kwargs)
def vlog_is_on(level):
"""Checks if vlog is enabled for the given level in caller's source file.
Args:
level: int, the C++ verbose logging level at which to log the message,
e.g. 1, 2, 3, 4... While absl level constants are also supported,
callers should prefer level_debug|level_info|... calls for
checking those.
Returns:
True if logging is turned on for that level.
"""
if level > converter.ABSL_DEBUG:
# Even though this function supports level that is greater than 1, users
# should use logging.vlog instead for such cases.
# Treat this as vlog, 1 is equivalent to DEBUG.
standard_level = converter.STANDARD_DEBUG - (level - 1)
else:
if level < converter.ABSL_FATAL:
level = converter.ABSL_FATAL
standard_level = converter.absl_to_standard(level)
return _absl_logger.isEnabledFor(standard_level)
def flush():
"""Flushes all log files."""
get_absl_handler().flush()
def level_debug():
"""Returns True if debug logging is turned on."""
return get_verbosity() >= DEBUG
def level_info():
"""Returns True if info logging is turned on."""
return get_verbosity() >= INFO
def level_warning():
"""Returns True if warning logging is turned on."""
return get_verbosity() >= WARNING
level_warn = level_warning # Deprecated function.
def level_error():
"""Returns True if error logging is turned on."""
return get_verbosity() >= ERROR
def get_log_file_name(level=INFO):
"""Returns the name of the log file.
For Python logging, only one file is used and level is ignored. And it returns
empty string if it logs to stderr/stdout or the log stream has no `name`
attribute.
Args:
level: int, the absl.logging level.
Raises:
ValueError: Raised when `level` has an invalid value.
"""
if level not in converter.ABSL_LEVELS:
raise ValueError('Invalid absl.logging level {}'.format(level))
stream = get_absl_handler().python_handler.stream
if (stream == sys.stderr or stream == sys.stdout or
not hasattr(stream, 'name')):
return ''
else:
return stream.name
def find_log_dir_and_names(program_name=None, log_dir=None):
"""Computes the directory and filename prefix for log file.
Args:
program_name: str|None, the filename part of the path to the program that
is running without its extension. e.g: if your program is called
``usr/bin/foobar.py`` this method should probably be called with
``program_name='foobar`` However, this is just a convention, you can
pass in any string you want, and it will be used as part of the
log filename. If you don't pass in anything, the default behavior
is as described in the example. In python standard logging mode,
the program_name will be prepended with ``py_`` if it is the
``program_name`` argument is omitted.
log_dir: str|None, the desired log directory.
Returns:
(log_dir, file_prefix, symlink_prefix)
Raises:
FileNotFoundError: raised in Python 3 when it cannot find a log directory.
OSError: raised in Python 2 when it cannot find a log directory.
"""
if not program_name:
# Strip the extension (foobar.par becomes foobar, and
# fubar.py becomes fubar). We do this so that the log
# file names are similar to C++ log file names.
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
# Prepend py_ to files so that python code gets a unique file, and
# so that C++ libraries do not try to write to the same log files as us.
program_name = 'py_%s' % program_name
actual_log_dir = find_log_dir(log_dir=log_dir)
try:
username = getpass.getuser()
except KeyError:
# This can happen, e.g. when running under docker w/o passwd file.
if hasattr(os, 'getuid'):
# Windows doesn't have os.getuid
username = str(os.getuid())
else:
username = 'unknown'
hostname = socket.gethostname()
file_prefix = '%s.%s.%s.log' % (program_name, hostname, username)
return actual_log_dir, file_prefix, program_name
def find_log_dir(log_dir=None):
"""Returns the most suitable directory to put log files into.
Args:
log_dir: str|None, if specified, the logfile(s) will be created in that
directory. Otherwise if the --log_dir command-line flag is provided,
the logfile will be created in that directory. Otherwise the logfile
will be created in a standard location.
Raises:
FileNotFoundError: raised in Python 3 when it cannot find a log directory.
OSError: raised in Python 2 when it cannot find a log directory.
"""
# Get a list of possible log dirs (will try to use them in order).
if log_dir:
# log_dir was explicitly specified as an arg, so use it and it alone.
dirs = [log_dir]
elif FLAGS['log_dir'].value:
# log_dir flag was provided, so use it and it alone (this mimics the
# behavior of the same flag in logging.cc).
dirs = [FLAGS['log_dir'].value]
else:
dirs = ['/tmp/', './']
# Find the first usable log dir.
for d in dirs:
if os.path.isdir(d) and os.access(d, os.W_OK):
return d
raise FileNotFoundError(
"Can't find a writable directory for logs, tried %s" % dirs)
def get_absl_log_prefix(record):
"""Returns the absl log prefix for the log record.
Args:
record: logging.LogRecord, the record to get prefix for.
"""
created_tuple = time.localtime(record.created)
created_microsecond = int(record.created % 1.0 * 1e6)
critical_prefix = ''
level = record.levelno
if _is_non_absl_fatal_record(record):
# When the level is FATAL, but not logged from absl, lower the level so
# it's treated as ERROR.
level = logging.ERROR
critical_prefix = _CRITICAL_PREFIX
severity = converter.get_initial_for_level(level)
return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (
severity,
created_tuple.tm_mon,
created_tuple.tm_mday,
created_tuple.tm_hour,
created_tuple.tm_min,
created_tuple.tm_sec,
created_microsecond,
_get_thread_id(),
record.filename,
record.lineno,
critical_prefix)
def skip_log_prefix(func):
"""Skips reporting the prefix of a given function or name by :class:`~absl.logging.ABSLLogger`.
This is a convenience wrapper function / decorator for
:meth:`~absl.logging.ABSLLogger.register_frame_to_skip`.
If a callable function is provided, only that function will be skipped.
If a function name is provided, all functions with the same name in the
file that this is called in will be skipped.
This can be used as a decorator of the intended function to be skipped.
Args:
func: Callable function or its name as a string.
Returns:
func (the input, unchanged).
Raises:
ValueError: The input is callable but does not have a function code object.
TypeError: The input is neither callable nor a string.
"""
if callable(func):
func_code = getattr(func, '__code__', None)
if func_code is None:
raise ValueError('Input callable does not have a function code object.')
file_name = func_code.co_filename
func_name = func_code.co_name
func_lineno = func_code.co_firstlineno
elif isinstance(func, str):
file_name = get_absl_logger().findCaller()[0]
func_name = func
func_lineno = None
else:
raise TypeError('Input is neither callable nor a string.')
ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno)
return func
def _is_non_absl_fatal_record(log_record):
return (log_record.levelno >= logging.FATAL and
not log_record.__dict__.get(_ABSL_LOG_FATAL, False))
def _is_absl_fatal_record(log_record):
return (log_record.levelno >= logging.FATAL and
log_record.__dict__.get(_ABSL_LOG_FATAL, False))
# Indicates if we still need to warn about pre-init logs going to stderr.
_warn_preinit_stderr = True
class PythonHandler(logging.StreamHandler):
"""The handler class used by Abseil Python logging implementation."""
def __init__(self, stream=None, formatter=None):
super(PythonHandler, self).__init__(stream)
self.setFormatter(formatter or PythonFormatter())
def start_logging_to_file(self, program_name=None, log_dir=None):
"""Starts logging messages to files instead of standard error."""
FLAGS.logtostderr = False
actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names(
program_name=program_name, log_dir=log_dir)
basename = '%s.INFO.%s.%d' % (
file_prefix,
time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time())),
os.getpid())
filename = os.path.join(actual_log_dir, basename)
self.stream = open(filename, 'a', encoding='utf-8')
# os.symlink is not available on Windows Python 2.
if getattr(os, 'symlink', None):
# Create a symlink to the log file with a canonical name.
symlink = os.path.join(actual_log_dir, symlink_prefix + '.INFO')
try:
if os.path.islink(symlink):
os.unlink(symlink)
os.symlink(os.path.basename(filename), symlink)
except EnvironmentError:
# If it fails, we're sad but it's no error. Commonly, this
# fails because the symlink was created by another user and so
# we can't modify it
pass
def use_absl_log_file(self, program_name=None, log_dir=None):
"""Conditionally logs to files, based on --logtostderr."""
if FLAGS['logtostderr'].value:
self.stream = sys.stderr
else:
self.start_logging_to_file(program_name=program_name, log_dir=log_dir)
def flush(self):
"""Flushes all log files."""
self.acquire()
try:
self.stream.flush()
except (EnvironmentError, ValueError):
# A ValueError is thrown if we try to flush a closed file.
pass
finally:
self.release()
def _log_to_stderr(self, record):
"""Emits the record to stderr.
This temporarily sets the handler stream to stderr, calls
StreamHandler.emit, then reverts the stream back.
Args:
record: logging.LogRecord, the record to log.
"""
# emit() is protected by a lock in logging.Handler, so we don't need to
# protect here again.
old_stream = self.stream
self.stream = sys.stderr
try:
super(PythonHandler, self).emit(record)
finally:
self.stream = old_stream
def emit(self, record):
"""Prints a record out to some streams.
1. If ``FLAGS.logtostderr`` is set, it will print to ``sys.stderr`` ONLY.
2. If ``FLAGS.alsologtostderr`` is set, it will print to ``sys.stderr``.
3. If ``FLAGS.logtostderr`` is not set, it will log to the stream
associated with the current thread.
Args:
record: :class:`logging.LogRecord`, the record to emit.
"""
# People occasionally call logging functions at import time before
# our flags may have even been defined yet, let alone even parsed, as we
# rely on the C++ side to define some flags for us and app init to
# deal with parsing. Match the C++ library behavior of notify and emit
# such messages to stderr. It encourages people to clean-up and does
# not hide the message.
level = record.levelno
if not FLAGS.is_parsed(): # Also implies "before flag has been defined".
global _warn_preinit_stderr
if _warn_preinit_stderr:
sys.stderr.write(
'WARNING: Logging before flag parsing goes to stderr.\n')
_warn_preinit_stderr = False
self._log_to_stderr(record)
elif FLAGS['logtostderr'].value:
self._log_to_stderr(record)
else:
super(PythonHandler, self).emit(record)
stderr_threshold = converter.string_to_standard(
FLAGS['stderrthreshold'].value)
if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and
self.stream != sys.stderr):
self._log_to_stderr(record)
# Die when the record is created from ABSLLogger and level is FATAL.
if _is_absl_fatal_record(record):
self.flush() # Flush the log before dying.
# In threaded python, sys.exit() from a non-main thread only
# exits the thread in question.
os.abort()
def close(self):
"""Closes the stream to which we are writing."""
self.acquire()
try:
self.flush()
try:
# Do not close the stream if it's sys.stderr|stdout. They may be
# redirected or overridden to files, which should be managed by users
# explicitly.
user_managed = sys.stderr, sys.stdout, sys.__stderr__, sys.__stdout__
if self.stream not in user_managed and (
not hasattr(self.stream, 'isatty') or not self.stream.isatty()):
self.stream.close()
except ValueError:
# A ValueError is thrown if we try to run isatty() on a closed file.
pass
super(PythonHandler, self).close()
finally:
self.release()
class ABSLHandler(logging.Handler):
"""Abseil Python logging module's log handler."""
def __init__(self, python_logging_formatter):
super(ABSLHandler, self).__init__()
self._python_handler = PythonHandler(formatter=python_logging_formatter)
self.activate_python_handler()
def format(self, record):
return self._current_handler.format(record)
def setFormatter(self, fmt):
self._current_handler.setFormatter(fmt)
def emit(self, record):
self._current_handler.emit(record)
def flush(self):
self._current_handler.flush()
def close(self):
super(ABSLHandler, self).close()
self._current_handler.close()
def handle(self, record):
rv = self.filter(record)
if rv:
return self._current_handler.handle(record)
return rv
@property
def python_handler(self):
return self._python_handler
def activate_python_handler(self):
"""Uses the Python logging handler as the current logging handler."""
self._current_handler = self._python_handler
def use_absl_log_file(self, program_name=None, log_dir=None):
self._current_handler.use_absl_log_file(program_name, log_dir)
def start_logging_to_file(self, program_name=None, log_dir=None):
self._current_handler.start_logging_to_file(program_name, log_dir)
class PythonFormatter(logging.Formatter):
"""Formatter class used by :class:`~absl.logging.PythonHandler`."""
def format(self, record):
"""Appends the message from the record to the results of the prefix.
Args:
record: logging.LogRecord, the record to be formatted.
Returns:
The formatted string representing the record.
"""
if (not FLAGS['showprefixforinfo'].value and
FLAGS['verbosity'].value == converter.ABSL_INFO and
record.levelno == logging.INFO and
_absl_handler.python_handler.stream == sys.stderr):
prefix = ''
else:
prefix = get_absl_log_prefix(record)
return prefix + super(PythonFormatter, self).format(record)
class ABSLLogger(logging.getLoggerClass()):
"""A logger that will create LogRecords while skipping some stack frames.
This class maintains an internal list of filenames and method names
for use when determining who called the currently executing stack
frame. Any method names from specific source files are skipped when
walking backwards through the stack.
Client code should use the register_frame_to_skip method to let the
ABSLLogger know which method from which file should be
excluded from the walk backwards through the stack.
"""
_frames_to_skip = set()
def findCaller(self, stack_info=False, stacklevel=1):
"""Finds the frame of the calling method on the stack.
This method skips any frames registered with the
ABSLLogger and any methods from this file, and whatever
method is currently being used to generate the prefix for the log
line. Then it returns the file name, line number, and method name
of the calling method. An optional fourth item may be returned,
callers who only need things from the first three are advised to
always slice or index the result rather than using direct unpacking
assignment.
Args:
stack_info: bool, when True, include the stack trace as a fourth item
returned. On Python 3 there are always four items returned - the
fourth will be None when this is False. On Python 2 the stdlib
base class API only returns three items. We do the same when this
new parameter is unspecified or False for compatibility.
Returns:
(filename, lineno, methodname[, sinfo]) of the calling method.
"""
f_to_skip = ABSLLogger._frames_to_skip
# Use sys._getframe(2) instead of logging.currentframe(), it's slightly
# faster because there is one less frame to traverse.
frame = sys._getframe(2) # pylint: disable=protected-access
while frame:
code = frame.f_code
if (_LOGGING_FILE_PREFIX not in code.co_filename and
(code.co_filename, code.co_name,
code.co_firstlineno) not in f_to_skip and
(code.co_filename, code.co_name) not in f_to_skip):
sinfo = None
if stack_info:
out = io.StringIO()
out.write(u'Stack (most recent call last):\n')
traceback.print_stack(frame, file=out)
sinfo = out.getvalue().rstrip(u'\n')
return (code.co_filename, frame.f_lineno, code.co_name, sinfo)
frame = frame.f_back
def critical(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``CRITICAL``."""
self.log(logging.CRITICAL, msg, *args, **kwargs)
def fatal(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``FATAL``."""
self.log(logging.FATAL, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``ERROR``."""
self.log(logging.ERROR, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``WARN``."""
warnings.warn("The 'warn' method is deprecated, use 'warning' instead",
DeprecationWarning, 2)
self.log(logging.WARN, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``WARNING``."""
self.log(logging.WARNING, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``INFO``."""
self.log(logging.INFO, msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``DEBUG``."""
self.log(logging.DEBUG, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""Logs a message at a cetain level substituting in the supplied arguments.
This method behaves differently in python and c++ modes.
Args:
level: int, the standard logging level at which to log the message.
msg: str, the text of the message to log.
*args: The arguments to substitute in the message.
**kwargs: The keyword arguments to substitute in the message.
"""
if level >= logging.FATAL:
# Add property to the LogRecord created by this logger.
# This will be used by the ABSLHandler to determine whether it should
# treat CRITICAL/FATAL logs as really FATAL.
extra = kwargs.setdefault('extra', {})
extra[_ABSL_LOG_FATAL] = True
super(ABSLLogger, self).log(level, msg, *args, **kwargs)
def handle(self, record):
"""Calls handlers without checking ``Logger.disabled``.
Non-root loggers are set to disabled after setup with :func:`logging.config`
if it's not explicitly specified. Historically, absl logging will not be
disabled by that. To maintaining this behavior, this function skips
checking the ``Logger.disabled`` bit.
This logger can still be disabled by adding a filter that filters out
everything.
Args:
record: logging.LogRecord, the record to handle.
"""
if self.filter(record):
self.callHandlers(record)
@classmethod
def register_frame_to_skip(cls, file_name, function_name, line_number=None):
"""Registers a function name to skip when walking the stack.
The :class:`~absl.logging.ABSLLogger` sometimes skips method calls on the
stack to make the log messages meaningful in their appropriate context.
This method registers a function from a particular file as one
which should be skipped.
Args:
file_name: str, the name of the file that contains the function.
function_name: str, the name of the function to skip.
line_number: int, if provided, only the function with this starting line
number will be skipped. Otherwise, all functions with the same name
in the file will be skipped.
"""
if line_number is not None:
cls._frames_to_skip.add((file_name, function_name, line_number))
else:
cls._frames_to_skip.add((file_name, function_name))
def _get_thread_id():
"""Gets id of current thread, suitable for logging as an unsigned quantity.
If pywrapbase is linked, returns GetTID() for the thread ID to be
consistent with C++ logging. Otherwise, returns the numeric thread id.
The quantities are made unsigned by masking with 2*sys.maxint + 1.
Returns:
Thread ID unique to this process (unsigned)
"""
thread_id = threading.get_ident()
return thread_id & _THREAD_ID_MASK
def get_absl_logger():
"""Returns the absl logger instance."""
return _absl_logger
def get_absl_handler():
"""Returns the absl handler instance."""
return _absl_handler
def use_python_logging(quiet=False):
"""Uses the python implementation of the logging code.
Args:
quiet: No logging message about switching logging type.
"""
get_absl_handler().activate_python_handler()
if not quiet:
info('Restoring pure python logging')
_attempted_to_remove_stderr_stream_handlers = False
def use_absl_handler():
"""Uses the ABSL logging handler for logging.
This method is called in :func:`app.run()<absl.app.run>` so the absl handler
is used in absl apps.
"""
global _attempted_to_remove_stderr_stream_handlers
if not _attempted_to_remove_stderr_stream_handlers:
# The absl handler logs to stderr by default. To prevent double logging to
# stderr, the following code tries its best to remove other handlers that
# emit to stderr. Those handlers are most commonly added when
# logging.info/debug is called before calling use_absl_handler().
handlers = [
h for h in logging.root.handlers
if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr]
for h in handlers:
logging.root.removeHandler(h)
_attempted_to_remove_stderr_stream_handlers = True
absl_handler = get_absl_handler()
if absl_handler not in logging.root.handlers:
logging.root.addHandler(absl_handler)
FLAGS['verbosity']._update_logging_levels() # pylint: disable=protected-access
FLAGS['logger_levels']._update_logger_levels() # pylint: disable=protected-access
def _initialize():
"""Initializes loggers and handlers."""
global _absl_logger, _absl_handler
if _absl_logger:
return
original_logger_class = logging.getLoggerClass()
logging.setLoggerClass(ABSLLogger)
_absl_logger = logging.getLogger('absl')
logging.setLoggerClass(original_logger_class)
python_logging_formatter = PythonFormatter()
_absl_handler = ABSLHandler(python_logging_formatter)
_initialize()
|
{
"content_hash": "aa78a809eb3b28452e71557ba8d0bacf",
"timestamp": "",
"source": "github",
"line_count": 1226,
"max_line_length": 97,
"avg_line_length": 33.296900489396414,
"alnum_prop": 0.670006369114693,
"repo_name": "abseil/abseil-py",
"id": "c0ba4b0fa245fe01bb4cdf9872a96836afb1e781",
"size": "41407",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "absl/logging/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1036477"
},
{
"name": "Shell",
"bytes": "2438"
},
{
"name": "Starlark",
"bytes": "16966"
}
],
"symlink_target": ""
}
|
"""The datastore models for histograms and diagnostics."""
import json
import sys
from google.appengine.ext import ndb
from dashboard.models import graph_data
from dashboard.models import internal_only_model
class JsonModel(internal_only_model.InternalOnlyModel):
# Similarly to Row, we don't need to memcache these as we don't expect to
# access them repeatedly.
_use_memcache = False
data = ndb.JsonProperty()
test = ndb.KeyProperty(graph_data.TestMetadata)
internal_only = ndb.BooleanProperty(default=False, indexed=True)
class Histogram(JsonModel):
# Needed for timeseries queries (e.g. for alerting).
revision = ndb.IntegerProperty(indexed=True)
class SparseDiagnostic(JsonModel):
# Need for intersecting range queries.
name = ndb.StringProperty(indexed=False)
start_revision = ndb.IntegerProperty(indexed=True)
end_revision = ndb.IntegerProperty(indexed=True)
@staticmethod
def GetMostRecentValuesByNames(test_key, diagnostic_names):
"""Gets the data in the latests sparse diagnostics with the given
set of diagnostic names.
Args:
test_key: The TestKey entity to lookup the diagnotics by
diagnostic_names: Set of the names of the diagnostics to look up
Returns:
A dictionary where the keys are the given names, and the values are the
corresponding diagnostics' values.
None if no diagnostics are found with the given keys or type.
"""
diagnostics = SparseDiagnostic.query(
ndb.AND(SparseDiagnostic.end_revision == sys.maxint,
SparseDiagnostic.test == test_key)).fetch()
diagnostic_map = {}
for diagnostic in diagnostics:
if diagnostic.name in diagnostic_names:
assert diagnostic_map.get(diagnostic.name) is None
diagnostic_data = json.loads(diagnostic.data)
diagnostic_map[diagnostic.name] = diagnostic_data.get('values')
return diagnostic_map
|
{
"content_hash": "87e26fdd7af671f502ac7b0c9ea54c5b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 33.03448275862069,
"alnum_prop": 0.7317327766179541,
"repo_name": "catapult-project/catapult-csm",
"id": "ae81d148a5d7744522b9fa1c4e0dfdedeb2b2d01",
"size": "2079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/dashboard/models/histogram.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
}
|
import json
import logging
import os
import platform
import typing
from io import TextIOWrapper
# Data class for a running step.
class _RunningPart:
def __init__(self, event_id: int, name: str, desc: str) -> None:
self.event_id = event_id
self.name = name
self.desc = desc
# Handle connection to buck. Manage running parts (assumed to have stack discipline).
class BuckConnection:
EVENT_TYPE_STEP = '{"eventType": "STEP_EVENT"}'
def __init__(self) -> None:
self.has_buck: typing.Optional[bool] = None
self.action_id = ""
self.event_id = 0
self.event_pipe: typing.Optional[TextIOWrapper] = None
self.running_parts: typing.List[_RunningPart] = []
def connect(self) -> None:
assert self.has_buck is None
if (
"BUCK_EVENT_PIPE" not in os.environ
or "BUCK_EVENT_PIPE" not in os.environ
or "BUCK_ACTION_ID" not in os.environ
# TODO(T103482589) Work around an issue on macs.
or platform.system() == "Darwin"
):
self.has_buck = False
return
self.has_buck = True
self.action_id = os.environ["BUCK_ACTION_ID"]
try:
self.__open_pipe()
self.__init_message()
except BaseException as e:
logging.warning("Failed to connect to buck: %s", e)
self.has_buck = False
def is_connected(self) -> bool:
return self.has_buck is not None
def disconnect(self) -> None:
local = self.event_pipe
if local:
local.close()
def __open_pipe(self) -> None:
event_path = os.path.abspath(os.environ["BUCK_EVENT_PIPE"])
# Need to go low-level for non-blocking connection.
fd = os.open(event_path, os.O_WRONLY | os.O_NONBLOCK)
if fd < 0:
raise RuntimeError(f"Could not open pipe to {event_path}")
self.event_pipe = open(fd, mode="w") # noqa(P201)
def __init_message(self) -> None:
local = self.event_pipe
assert local
local.write("j")
local.write(os.linesep)
local.flush()
def __create_step_message(self, event: _RunningPart, status: str) -> str:
message = {
"event_id": event.event_id,
"step_status": status,
"step_type": event.name,
"description": event.desc,
"action_id": self.action_id,
}
return json.dumps(message)
def __send_step(self, event: _RunningPart, status: str) -> None:
message = self.__create_step_message(event, status)
local = self.event_pipe
if not local:
return
try:
local.write(str(len(BuckConnection.EVENT_TYPE_STEP)))
local.write(os.linesep)
local.write(BuckConnection.EVENT_TYPE_STEP)
local.write(str(len(message)))
local.write(os.linesep)
local.write(message)
local.flush()
except (BrokenPipeError, BlockingIOError) as e:
logging.error("Buck pipe is broken! %s", e)
self.has_buck = False
self.event_pipe = None
def start(self, name: str, desc: str) -> None:
if not self.has_buck:
return
part = _RunningPart(self.event_id, name, desc)
self.event_id += 1
self.__send_step(part, "STARTED")
self.running_parts.append(part)
def end(self) -> None:
if not self.has_buck:
return
if not self.running_parts:
return
part = self.running_parts.pop()
self.__send_step(part, "FINISHED")
def size(self) -> int:
return len(self.running_parts)
def end_all(self, down_to: typing.Optional[int] = None) -> None:
if not self.has_buck:
return
left = 0 if not down_to else max(0, down_to)
while len(self.running_parts) > left:
part = self.running_parts.pop()
self.__send_step(part, "FINISHED")
_BUCK_CONNECTION = BuckConnection()
def get_buck_connection() -> BuckConnection:
return _BUCK_CONNECTION
class BuckConnectionScope:
def __init__(self) -> None:
self.was_connected = False
self.num_parts = 0
pass
def __enter__(self) -> BuckConnection:
self.was_connected = _BUCK_CONNECTION.is_connected()
if not self.was_connected:
_BUCK_CONNECTION.connect()
self.num_parts = _BUCK_CONNECTION.size()
return _BUCK_CONNECTION
def __exit__(self, *args: typing.Any) -> None:
_BUCK_CONNECTION.end_all(self.num_parts)
if not self.was_connected:
_BUCK_CONNECTION.disconnect()
class BuckPartScope:
def __init__(self, name: str, desc: str) -> None:
self.name = name
self.desc = desc
def __enter__(self) -> None:
_BUCK_CONNECTION.start(self.name, self.desc)
def __exit__(self, *args: typing.Any) -> None:
_BUCK_CONNECTION.end()
|
{
"content_hash": "416821343b414c0b927b7108144c2fb5",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 85,
"avg_line_length": 28.942528735632184,
"alnum_prop": 0.5702938840349484,
"repo_name": "facebook/redex",
"id": "d080acc2cc207e9cdf006676d786f7d2473aa58e",
"size": "5256",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pyredex/buck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1855"
},
{
"name": "C",
"bytes": "45691"
},
{
"name": "C++",
"bytes": "10822879"
},
{
"name": "CMake",
"bytes": "36765"
},
{
"name": "CSS",
"bytes": "2259"
},
{
"name": "Dockerfile",
"bytes": "232"
},
{
"name": "Java",
"bytes": "663048"
},
{
"name": "JavaScript",
"bytes": "12077"
},
{
"name": "Kotlin",
"bytes": "20642"
},
{
"name": "M4",
"bytes": "64700"
},
{
"name": "Makefile",
"bytes": "50587"
},
{
"name": "Perl",
"bytes": "1532"
},
{
"name": "Python",
"bytes": "494966"
},
{
"name": "Rust",
"bytes": "192507"
},
{
"name": "Shell",
"bytes": "25367"
}
],
"symlink_target": ""
}
|
from http.client import HTTPConnection
import json
import re
import base64
import sys
import os
import os.path
settings = {}
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
|
{
"content_hash": "7d22d52d0dcbb3666ffdb067b91d6c76",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 108,
"avg_line_length": 33.62237762237762,
"alnum_prop": 0.5809068219633944,
"repo_name": "chaincoin/chaincoin",
"id": "8529470e09b8a7afa6d3a24c404e1509896e9947",
"size": "5108",
"binary": false,
"copies": "6",
"ref": "refs/heads/0.18",
"path": "contrib/linearize/linearize-hashes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "782437"
},
{
"name": "C++",
"bytes": "7493134"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "199073"
},
{
"name": "Makefile",
"bytes": "123254"
},
{
"name": "Objective-C",
"bytes": "3901"
},
{
"name": "Objective-C++",
"bytes": "5382"
},
{
"name": "Python",
"bytes": "2532944"
},
{
"name": "QMake",
"bytes": "792"
},
{
"name": "Shell",
"bytes": "94132"
}
],
"symlink_target": ""
}
|
import numpy as np
from system.hardware.tici.power_monitor import sample_power
if __name__ == '__main__':
print("measuring for 5 seconds")
for _ in range(3):
pwrs = sample_power()
print("mean %.2f std %.2f" % (np.mean(pwrs), np.std(pwrs)))
|
{
"content_hash": "3b7d8d111f27a3a31d4231fafdbb6540",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 63,
"avg_line_length": 31.625,
"alnum_prop": 0.6403162055335968,
"repo_name": "commaai/openpilot",
"id": "5d6885136792db3dc5d5c3aeb4685a64380e8b0b",
"size": "276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "system/hardware/tici/precise_power_measure.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "604924"
},
{
"name": "C++",
"bytes": "1125553"
},
{
"name": "Cython",
"bytes": "50503"
},
{
"name": "Dockerfile",
"bytes": "1239"
},
{
"name": "Emacs Lisp",
"bytes": "124"
},
{
"name": "HTML",
"bytes": "11493"
},
{
"name": "Kaitai Struct",
"bytes": "8093"
},
{
"name": "MATLAB",
"bytes": "35190"
},
{
"name": "Makefile",
"bytes": "14018"
},
{
"name": "Python",
"bytes": "2386885"
},
{
"name": "QML",
"bytes": "1132"
},
{
"name": "Shell",
"bytes": "32876"
}
],
"symlink_target": ""
}
|
"""Support for Abode Security System covers."""
import logging
import abodepy.helpers.constants as CONST
from homeassistant.components.cover import CoverDevice
from . import AbodeDevice
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Platform uses config entry setup."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Abode cover devices."""
data = hass.data[DOMAIN]
entities = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_COVER):
entities.append(AbodeCover(data, device))
async_add_entities(entities)
class AbodeCover(AbodeDevice, CoverDevice):
"""Representation of an Abode cover."""
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return not self._device.is_open
def close_cover(self, **kwargs):
"""Issue close command to cover."""
self._device.close_cover()
def open_cover(self, **kwargs):
"""Issue open command to cover."""
self._device.open_cover()
|
{
"content_hash": "0d77140bef92383a3cb57cbe936cb2f3",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 86,
"avg_line_length": 26,
"alnum_prop": 0.6794871794871795,
"repo_name": "leppa/home-assistant",
"id": "a4fce7e7b8acacf6086a27d53c6990a955ed69ff",
"size": "1170",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/abode/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import logging
from rest_framework import generics
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.conf import settings
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.utils import timezone
from biz.account.forms import CloudUserCreateForm
from biz.account.models import Contract, Operation, Quota, UserProxy, QUOTA_ITEM
from biz.account.serializer import ContractSerializer, OperationSerializer, UserSerializer, QuotaSerializer
from biz.account.utils import get_quota_usage
from biz.idc.models import DataCenter
from eoncloud_web.pagination import PagePagination
LOG = logging.getLogger(__name__)
def signup(request, template_name="signup.html"):
error = None
if request.method == "GET":
userCreationForm = CloudUserCreateForm()
elif request.method == "POST":
user = User()
userCreationForm = CloudUserCreateForm(data=request.POST, instance=user)
if userCreationForm.is_valid():
userCreationForm.save()
return HttpResponseRedirect(reverse("signup_success"))
if userCreationForm.errors.has_key("__all__"):
error = userCreationForm.errors['__all__']
else:
error = userCreationForm.errors
return render_to_response(template_name, RequestContext(request, {
"MCC": settings.MCC,
"SOURCE": settings.SOURCE,
"USER_TYPE": settings.USER_TYPE,
"BRAND": settings.BRAND,
"userCreationForm": userCreationForm,
"error": error,
}))
def signup_success(request, template_name="signup_success.html"):
return render_to_response(template_name, RequestContext(request, {
"BRAND": settings.BRAND,
}))
def find_password(request, template_name="find_password.html"):
return render_to_response(template_name, RequestContext(request, {
"BRAND": settings.BRAND,
}))
@api_view(["GET"])
def contract_view(request):
c = Contract.objects.filter(user=request.user, udc__id=request.session["UDC_ID"])[0]
s = ContractSerializer(c)
return Response(s.data)
@api_view(["GET"])
def quota_view(request):
quota = get_quota_usage(request.user, request.session["UDC_ID"])
return Response(quota)
class OperationList(generics.ListAPIView):
queryset = Operation.objects
serializer_class = OperationSerializer
pagination_class = PagePagination
def get_queryset(self):
request = self.request
resource = request.query_params.get('resource')
resource_name = request.query_params.get('resource_name')
start_date = request.query_params.get('start_date')
end_date = request.query_params.get('end_date')
queryset = super(OperationList, self).get_queryset()
if resource:
queryset = queryset.filter(resource=resource)
if resource_name:
queryset = queryset.filter(resource_name__istartswith=resource_name)
if start_date:
queryset = queryset.filter(create_date__gte=start_date)
if end_date:
queryset = queryset.filter(create_date__lte=end_date)
if request.user.is_superuser:
data_center_pk = request.query_params.get('data_center', '')
operator_pk = request.query_params.get('operator', '')
if data_center_pk:
queryset = queryset.filter(udc__data_center__pk=data_center_pk)
if operator_pk:
queryset = queryset.filter(user__pk=operator_pk)
else:
queryset = queryset.filter(user=request.user, udc__id=request.session["UDC_ID"])
return queryset.order_by('-create_date')
@api_view()
def operation_filters(request):
resources = Operation.objects.values('resource').distinct()
for data in resources:
data['name'] = _(data['resource'])
return Response({
"resources": resources,
"operators": UserProxy.normal_users.values('pk', 'username'),
"data_centers": DataCenter.objects.values('pk', 'name')
})
class ContractList(generics.ListCreateAPIView):
queryset = Contract.living.filter(deleted=False)
serializer_class = ContractSerializer
def list(self, request, *args, **kwargs):
serializer = ContractSerializer(self.get_queryset(), many=True)
return Response(serializer.data)
class ContractDetail(generics.RetrieveAPIView):
queryset = Contract.living.all()
serializer_class = ContractSerializer
@api_view(['POST'])
def create_contract(request):
try:
serializer = ContractSerializer(data=request.data, context={"request": request})
if serializer.is_valid():
contract = serializer.save()
Operation.log(contract, contract.name, 'create', udc=contract.udc, user=request.user)
return Response({'success': True,
"msg": _('Contract is created successfully!')},
status=status.HTTP_201_CREATED)
else:
return Response({"success": False,
"msg": _('Contract data is not valid!'),
'errors': serializer.errors},
status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
LOG.error("Failed to create contract, msg:[%s]" % e)
return Response({"success": False, "msg": _('Failed to create contract for unknown reason.')})
@api_view(['POST'])
def update_contract(request):
try:
pk = request.data['id']
contract = Contract.objects.get(pk=pk)
contract.name = request.data['name']
contract.customer = request.data['customer']
contract.start_date = datetime.strptime(request.data['start_date'], '%Y-%m-%d %H:%M:%S')
contract.end_date = datetime.strptime(request.data['end_date'], '%Y-%m-%d %H:%M:%S')
contract.save()
Operation.log(contract, contract.name, 'update', udc=contract.udc, user=request.user)
return Response({'success': True, "msg": _('Contract is updated successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to update contract, msg:[%s]" % e)
return Response({"success": False, "msg": _('Failed to update contract for unknown reason.')})
@api_view(['POST'])
def delete_contracts(request):
try:
contract_ids = request.data.getlist('contract_ids[]')
for contract_id in contract_ids:
contract = Contract.objects.get(pk=contract_id)
contract.deleted = True
contract.save()
Quota.living.filter(contract__pk=contract_id).update(deleted=True, update_date=timezone.now())
Operation.log(contract, contract.name, 'delete', udc=contract.udc, user=request.user)
return Response({'success': True, "msg": _('Contracts have been deleted!')}, status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to delete contracts, msg:[%s]" % e)
return Response({"success": False, "msg": _('Failed to delete contracts for unknown reason.')})
class UserList(generics.ListAPIView):
queryset = UserProxy.normal_users
serializer_class = UserSerializer
def list(self, request, *args, **kwargs):
serializer = self.serializer_class(self.get_queryset(), many=True)
return Response(serializer.data)
class UserDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
def perform_destroy(self, instance):
instance.is_active = False
instance.save()
@api_view(['POST'])
def deactivate_user(request):
pk = request.data['id']
user = User.objects.get(pk=pk)
user.is_active = False
user.save()
return Response({"success": True, "msg": _('User has been deactivated!')}, status=status.HTTP_200_OK)
@api_view(['POST'])
def activate_user(request):
pk = request.data['id']
user = User.objects.get(pk=pk)
user.is_active = True
user.save()
return Response({"success": True, "msg": _('User has been activated!')}, status=status.HTTP_200_OK)
class QuotaList(generics.ListAPIView):
queryset = Quota.living
serializer_class = QuotaSerializer
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
if 'contract_id' in request.query_params:
queryset = queryset.filter(contract__id=request.query_params['contract_id'])
return Response(self.serializer_class(queryset, many=True).data)
class QuotaDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Quota.living
serializer_class = QuotaSerializer
@api_view(['GET'])
def resource_options(request):
return Response(QUOTA_ITEM)
@api_view(['POST'])
def create_quotas(request):
try:
contract = Contract.objects.get(pk=request.data['contract_id'])
quota_ids = request.data.getlist('ids[]')
resources = request.data.getlist('resources[]')
limits = request.data.getlist('limits[]')
for index, quota_id in enumerate(quota_ids):
resource, limit = resources[index], limits[index]
if quota_id and Quota.living.filter(contract=contract, pk=quota_id).exists():
Quota.objects.filter(pk=quota_id).update(resource=resource,
limit=limit,
update_date=timezone.now())
else:
Quota.objects.create(resource=resource, limit=limit, contract=contract)
Operation.log(contract, contract.name + " quota", 'update',
udc=contract.udc, user=request.user)
return Response({'success': True,
"msg": _('Quotas have been saved successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to save quotas, msg:[%s]" % e)
return Response({"success": False, "msg": _('Failed to save quotas for unknown reason.')})
@api_view(['POST'])
def create_quota(request):
try:
contract = Contract.objects.get(pk=request.data['contract'])
resource, limit = request.data['resource'], request.data['limit']
pk = request.data['id'] if 'id' in request.data else None
if pk and Quota.objects.filter(pk=pk).exists():
quota = Quota.objects.get(pk=pk)
quota.limit = limit
quota.save()
else:
quota = Quota.objects.create(resource=resource, limit=limit, contract=contract)
return Response({'success': True,
"msg": _('Quota have been saved successfully!'),
"quota": QuotaSerializer(quota).data},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to save quota, msg:[%s]" % e)
return Response({"success": False, "msg": _('Failed to save quota for unknown reason.')})
@api_view(['POST'])
def delete_quota(request):
try:
Quota.living.filter(pk=request.data['id']).update(deleted=True)
return Response({'success': True,
"msg": _('Quota have been deleted successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to create quota, msg:[%s]" % e)
return Response({"success": False, "msg": _('Failed to create quota for unknown reason.')})
@api_view(["GET"])
def get_config_view(request):
return Response(settings.SITE_CONFIG)
|
{
"content_hash": "739df7200d295d473d5b3e86490beb95",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 116,
"avg_line_length": 33.37883008356546,
"alnum_prop": 0.6362346657765168,
"repo_name": "bluven/eonboard",
"id": "8dca1b265f1553495246f1df2f0c2c5813a44b44",
"size": "12004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eoncloud_web/biz/account/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "881794"
},
{
"name": "HTML",
"bytes": "286877"
},
{
"name": "JavaScript",
"bytes": "424332"
},
{
"name": "Python",
"bytes": "506262"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import io
import os
import os.path
import sys
import stat
import tempfile
import traceback
from collections import namedtuple
from yaml import load as yaml_load
try:
# use C version if possible for speedup
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
from ansible.config.data import ConfigData
from ansible.errors import AnsibleOptionsError, AnsibleError
from ansible.module_utils.six import PY3, string_types
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_text, to_bytes, to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.parsing.quoting import unquote
from ansible.utils import py3compat
from ansible.utils.path import unfrackpath
from ansible.utils.path import makedirs_safe
Plugin = namedtuple('Plugin', 'name type')
Setting = namedtuple('Setting', 'name value origin type')
INTERNAL_DEFS = {'lookup': ('_terms',)}
def _get_entry(plugin_type, plugin_name, config):
''' construct entry for requested config '''
entry = ''
if plugin_type:
entry += 'plugin_type: %s ' % plugin_type
if plugin_name:
entry += 'plugin: %s ' % plugin_name
entry += 'setting: %s ' % config
return entry
# FIXME: see if we can unify in module_utils with similar function used by argspec
def ensure_type(value, value_type, origin=None):
''' return a configuration variable with casting
:arg value: The value to ensure correct typing of
:kwarg value_type: The type of the value. This can be any of the following strings:
:boolean: sets the value to a True or False value
:bool: Same as 'boolean'
:integer: Sets the value to an integer or raises a ValueType error
:int: Same as 'integer'
:float: Sets the value to a float or raises a ValueType error
:list: Treats the value as a comma separated list. Split the value
and return it as a python list.
:none: Sets the value to None
:path: Expands any environment variables and tilde's in the value.
:tmppath: Create a unique temporary directory inside of the directory
specified by value and return its path.
:temppath: Same as 'tmppath'
:tmp: Same as 'tmppath'
:pathlist: Treat the value as a typical PATH string. (On POSIX, this
means colon separated strings.) Split the value and then expand
each part for environment variables and tildes.
:pathspec: Treat the value as a PATH string. Expands any environment variables
tildes's in the value.
:str: Sets the value to string types.
:string: Same as 'str'
'''
basedir = None
if origin and os.path.isabs(origin) and os.path.exists(to_bytes(origin)):
basedir = origin
if value_type:
value_type = value_type.lower()
if value_type in ('boolean', 'bool'):
value = boolean(value, strict=False)
elif value is not None:
if value_type in ('integer', 'int'):
value = int(value)
elif value_type == 'float':
value = float(value)
elif value_type == 'list':
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif value_type == 'none':
if value == "None":
value = None
elif value_type == 'path':
value = resolve_path(value, basedir=basedir)
elif value_type in ('tmp', 'temppath', 'tmppath'):
value = resolve_path(value, basedir=basedir)
if not os.path.exists(value):
makedirs_safe(value, 0o700)
prefix = 'ansible-local-%s' % os.getpid()
value = tempfile.mkdtemp(prefix=prefix, dir=value)
elif value_type == 'pathspec':
if isinstance(value, string_types):
value = value.split(os.pathsep)
value = [resolve_path(x, basedir=basedir) for x in value]
elif value_type == 'pathlist':
if isinstance(value, string_types):
value = value.split(',')
value = [resolve_path(x, basedir=basedir) for x in value]
elif value_type in ('str', 'string'):
value = unquote(to_text(value, errors='surrogate_or_strict'))
# defaults to string type
elif isinstance(value, string_types):
value = unquote(value)
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
# FIXME: see if this can live in utils/path
def resolve_path(path, basedir=None):
''' resolve relative or 'variable' paths '''
if '{{CWD}}' in path: # allow users to force CWD using 'magic' {{CWD}}
path = path.replace('{{CWD}}', os.getcwd())
return unfrackpath(path, follow=False, basedir=basedir)
# FIXME: generic file type?
def get_config_type(cfile):
ftype = None
if cfile is not None:
ext = os.path.splitext(cfile)[-1]
if ext in ('.ini', '.cfg'):
ftype = 'ini'
elif ext in ('.yaml', '.yml'):
ftype = 'yaml'
else:
raise AnsibleOptionsError("Unsupported configuration file extension for %s: %s" % (cfile, to_native(ext)))
return ftype
# FIXME: can move to module_utils for use for ini plugins also?
def get_ini_config_value(p, entry):
''' returns the value of last ini entry found '''
value = None
if p is not None:
try:
value = p.get(entry.get('section', 'defaults'), entry.get('key', ''), raw=True)
except Exception: # FIXME: actually report issues here
pass
return value
def find_ini_config_file(warnings=None):
''' Load INI Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
# FIXME: eventually deprecate ini configs
if warnings is None:
# Note: In this case, warnings does nothing
warnings = set()
# A value that can never be a valid path so that we can tell if ANSIBLE_CONFIG was set later
# We can't use None because we could set path to None.
SENTINEL = object
potential_paths = []
# Environment setting
path_from_env = os.getenv("ANSIBLE_CONFIG", SENTINEL)
if path_from_env is not SENTINEL:
path_from_env = unfrackpath(path_from_env, follow=False)
if os.path.isdir(to_bytes(path_from_env)):
path_from_env = os.path.join(path_from_env, "ansible.cfg")
potential_paths.append(path_from_env)
# Current working directory
warn_cmd_public = False
try:
cwd = os.getcwd()
perms = os.stat(cwd)
cwd_cfg = os.path.join(cwd, "ansible.cfg")
if perms.st_mode & stat.S_IWOTH:
# Working directory is world writable so we'll skip it.
# Still have to look for a file here, though, so that we know if we have to warn
if os.path.exists(cwd_cfg):
warn_cmd_public = True
else:
potential_paths.append(cwd_cfg)
except OSError:
# If we can't access cwd, we'll simply skip it as a possible config source
pass
# Per user location
potential_paths.append(unfrackpath("~/.ansible.cfg", follow=False))
# System location
potential_paths.append("/etc/ansible/ansible.cfg")
for path in potential_paths:
if os.path.exists(to_bytes(path)):
break
else:
path = None
# Emit a warning if all the following are true:
# * We did not use a config from ANSIBLE_CONFIG
# * There's an ansible.cfg in the current working directory that we skipped
if path_from_env != path and warn_cmd_public:
warnings.add(u"Ansible is being run in a world writable directory (%s),"
u" ignoring it as an ansible.cfg source."
u" For more information see"
u" https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir"
% to_text(cwd))
return path
class ConfigManager(object):
DEPRECATED = []
WARNINGS = set()
def __init__(self, conf_file=None, defs_file=None):
self._base_defs = {}
self._plugins = {}
self._parsers = {}
self._config_file = conf_file
self.data = ConfigData()
self._base_defs = self._read_config_yaml_file(defs_file or ('%s/base.yml' % os.path.dirname(__file__)))
if self._config_file is None:
# set config using ini
self._config_file = find_ini_config_file(self.WARNINGS)
# consume configuration
if self._config_file:
if os.path.exists(to_bytes(self._config_file)):
# initialize parser and read config
self._parse_config_file()
# update constants
self.update_config_data()
try:
self.update_module_defaults_groups()
except Exception as e:
# Since this is a 2.7 preview feature, we want to have it fail as gracefully as possible when there are issues.
sys.stderr.write('Could not load module_defaults_groups: %s: %s\n\n' % (type(e).__name__, e))
self.module_defaults_groups = {}
def _read_config_yaml_file(self, yml_file):
# TODO: handle relative paths as relative to the directory containing the current playbook instead of CWD
# Currently this is only used with absolute paths to the `ansible/config` directory
yml_file = to_bytes(yml_file)
if os.path.exists(yml_file):
with open(yml_file, 'rb') as config_def:
return yaml_load(config_def, Loader=SafeLoader) or {}
raise AnsibleError(
"Missing base YAML definition file (bad install?): %s" % to_native(yml_file))
def _parse_config_file(self, cfile=None):
''' return flat configuration settings from file(s) '''
# TODO: take list of files with merge/nomerge
if cfile is None:
cfile = self._config_file
ftype = get_config_type(cfile)
if cfile is not None:
if ftype == 'ini':
self._parsers[cfile] = configparser.ConfigParser()
with open(to_bytes(cfile), 'rb') as f:
try:
cfg_text = to_text(f.read(), errors='surrogate_or_strict')
except UnicodeError as e:
raise AnsibleOptionsError("Error reading config file(%s) because the config file was not utf8 encoded: %s" % (cfile, to_native(e)))
try:
if PY3:
self._parsers[cfile].read_string(cfg_text)
else:
cfg_file = io.StringIO(cfg_text)
self._parsers[cfile].readfp(cfg_file)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file (%s): %s" % (cfile, to_native(e)))
# FIXME: this should eventually handle yaml config files
# elif ftype == 'yaml':
# with open(cfile, 'rb') as config_stream:
# self._parsers[cfile] = yaml.safe_load(config_stream)
else:
raise AnsibleOptionsError("Unsupported configuration file type: %s" % to_native(ftype))
def _find_yaml_config_files(self):
''' Load YAML Config Files in order, check merge flags, keep origin of settings'''
pass
def get_plugin_options(self, plugin_type, name, keys=None, variables=None, direct=None):
options = {}
defs = self.get_configuration_definitions(plugin_type, name)
for option in defs:
options[option] = self.get_config_value(option, plugin_type=plugin_type, plugin_name=name, keys=keys, variables=variables, direct=direct)
return options
def get_plugin_vars(self, plugin_type, name):
pvars = []
for pdef in self.get_configuration_definitions(plugin_type, name).values():
if 'vars' in pdef and pdef['vars']:
for var_entry in pdef['vars']:
pvars.append(var_entry['name'])
return pvars
def get_configuration_definition(self, name, plugin_type=None, plugin_name=None):
ret = {}
if plugin_type is None:
ret = self._base_defs.get(name, None)
elif plugin_name is None:
ret = self._plugins.get(plugin_type, {}).get(name, None)
else:
ret = self._plugins.get(plugin_type, {}).get(plugin_name, {}).get(name, None)
return ret
def get_configuration_definitions(self, plugin_type=None, name=None):
''' just list the possible settings, either base or for specific plugins or plugin '''
ret = {}
if plugin_type is None:
ret = self._base_defs
elif name is None:
ret = self._plugins.get(plugin_type, {})
else:
ret = self._plugins.get(plugin_type, {}).get(name, {})
return ret
def _loop_entries(self, container, entry_list):
''' repeat code for value entry assignment '''
value = None
origin = None
for entry in entry_list:
name = entry.get('name')
temp_value = container.get(name, None)
if temp_value is not None: # only set if env var is defined
value = temp_value
origin = name
# deal with deprecation of setting source, if used
if 'deprecated' in entry:
self.DEPRECATED.append((entry['name'], entry['deprecated']))
return value, origin
def get_config_value(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None):
''' wrapper '''
try:
value, _drop = self.get_config_value_and_origin(config, cfile=cfile, plugin_type=plugin_type, plugin_name=plugin_name,
keys=keys, variables=variables, direct=direct)
except AnsibleError:
raise
except Exception as e:
raise AnsibleError("Unhandled exception when retrieving %s:\n%s" % (config, to_native(e)), orig_exc=e)
return value
def get_config_value_and_origin(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None):
''' Given a config key figure out the actual value and report on the origin of the settings '''
if cfile is None:
# use default config
cfile = self._config_file
# Note: sources that are lists listed in low to high precedence (last one wins)
value = None
origin = None
defs = self.get_configuration_definitions(plugin_type, plugin_name)
if config in defs:
# direct setting via plugin arguments, can set to None so we bypass rest of processing/defaults
direct_aliases = []
if direct:
direct_aliases = [direct[alias] for alias in defs[config].get('aliases', []) if alias in direct]
if direct and config in direct:
value = direct[config]
origin = 'Direct'
elif direct and direct_aliases:
value = direct_aliases[0]
origin = 'Direct'
else:
# Use 'variable overrides' if present, highest precedence, but only present when querying running play
if variables and defs[config].get('vars'):
value, origin = self._loop_entries(variables, defs[config]['vars'])
origin = 'var: %s' % origin
# use playbook keywords if you have em
if value is None and keys and config in keys:
value, origin = keys[config], 'keyword'
origin = 'keyword: %s' % origin
# env vars are next precedence
if value is None and defs[config].get('env'):
value, origin = self._loop_entries(py3compat.environ, defs[config]['env'])
origin = 'env: %s' % origin
# try config file entries next, if we have one
if self._parsers.get(cfile, None) is None:
self._parse_config_file(cfile)
if value is None and cfile is not None:
ftype = get_config_type(cfile)
if ftype and defs[config].get(ftype):
if ftype == 'ini':
# load from ini config
try: # FIXME: generalize _loop_entries to allow for files also, most of this code is dupe
for ini_entry in defs[config]['ini']:
temp_value = get_ini_config_value(self._parsers[cfile], ini_entry)
if temp_value is not None:
value = temp_value
origin = cfile
if 'deprecated' in ini_entry:
self.DEPRECATED.append(('[%s]%s' % (ini_entry['section'], ini_entry['key']), ini_entry['deprecated']))
except Exception as e:
sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e)))
elif ftype == 'yaml':
# FIXME: implement, also , break down key from defs (. notation???)
origin = cfile
# set default if we got here w/o a value
if value is None:
if defs[config].get('required', False):
if not plugin_type or config not in INTERNAL_DEFS.get(plugin_type, {}):
raise AnsibleError("No setting was provided for required configuration %s" %
to_native(_get_entry(plugin_type, plugin_name, config)))
else:
value = defs[config].get('default')
origin = 'default'
# skip typing as this is a templated default that will be resolved later in constants, which has needed vars
if plugin_type is None and isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')):
return value, origin
# ensure correct type, can raise exceptions on mismatched types
try:
value = ensure_type(value, defs[config].get('type'), origin=origin)
except ValueError as e:
if origin.startswith('env:') and value == '':
# this is empty env var for non string so we can set to default
origin = 'default'
value = ensure_type(defs[config].get('default'), defs[config].get('type'), origin=origin)
else:
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
(to_native(_get_entry(plugin_type, plugin_name, config)), to_native(e)))
# deal with deprecation of the setting
if 'deprecated' in defs[config] and origin != 'default':
self.DEPRECATED.append((config, defs[config].get('deprecated')))
else:
raise AnsibleError('Requested entry (%s) was not defined in configuration.' % to_native(_get_entry(plugin_type, plugin_name, config)))
return value, origin
def initialize_plugin_configuration_definitions(self, plugin_type, name, defs):
if plugin_type not in self._plugins:
self._plugins[plugin_type] = {}
self._plugins[plugin_type][name] = defs
def update_module_defaults_groups(self):
defaults_config = self._read_config_yaml_file(
'%s/module_defaults.yml' % os.path.join(os.path.dirname(__file__))
)
if defaults_config.get('version') not in ('1', '1.0', 1, 1.0):
raise AnsibleError('module_defaults.yml has an invalid version "%s" for configuration. Could be a bad install.' % defaults_config.get('version'))
self.module_defaults_groups = defaults_config.get('groupings', {})
def update_config_data(self, defs=None, configfile=None):
''' really: update constants '''
if defs is None:
defs = self._base_defs
if configfile is None:
configfile = self._config_file
if not isinstance(defs, dict):
raise AnsibleOptionsError("Invalid configuration definition type: %s for %s" % (type(defs), defs))
# update the constant for config file
self.data.update_setting(Setting('CONFIG_FILE', configfile, '', 'string'))
origin = None
# env and config defs can have several entries, ordered in list from lowest to highest precedence
for config in defs:
if not isinstance(defs[config], dict):
raise AnsibleOptionsError("Invalid configuration definition '%s': type is %s" % (to_native(config), type(defs[config])))
# get value and origin
try:
value, origin = self.get_config_value_and_origin(config, configfile)
except Exception as e:
# Printing the problem here because, in the current code:
# (1) we can't reach the error handler for AnsibleError before we
# hit a different error due to lack of working config.
# (2) We don't have access to display yet because display depends on config
# being properly loaded.
#
# If we start getting double errors printed from this section of code, then the
# above problem #1 has been fixed. Revamp this to be more like the try: except
# in get_config_value() at that time.
sys.stderr.write("Unhandled error:\n %s\n\n" % traceback.format_exc())
raise AnsibleError("Invalid settings supplied for %s: %s\n" % (config, to_native(e)), orig_exc=e)
# set the constant
self.data.update_setting(Setting(config, value, origin, defs[config].get('type', 'string')))
|
{
"content_hash": "91084a2e46563d6e4d3001ca3a9e217e",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 157,
"avg_line_length": 42.265917602996254,
"alnum_prop": 0.582100132919805,
"repo_name": "SergeyCherepanov/ansible",
"id": "ec3fc8e60143e24c625b73b5c5721c909a4f3378",
"size": "22703",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "ansible/ansible/config/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
import re
from cradmin_legacy import crinstance
from devilry.apps.core.models import Assignment
from devilry.devilry_examiner.cradminextensions import devilry_crmenu_examiner
from devilry.devilry_cradmin import devilry_crinstance
from devilry.devilry_examiner.views.assignment import grouplist
from devilry.devilry_examiner.views.assignment.download_files import download_archive
from devilry.devilry_examiner.views.assignment.bulkoperations import bulk_manage_deadline
class Menu(devilry_crmenu_examiner.Menu):
def build_menu(self):
super(Menu, self).build_menu()
assignment = self.request.cradmin_role
self.add_role_menuitem_object()
self.add_assignment_breadcrumb_item(assignment=assignment, active=True)
class CrAdminInstance(devilry_crinstance.BaseCrInstanceExaminer):
menuclass = Menu
roleclass = Assignment
apps = [
('grouplist', grouplist.App),
('download', download_archive.App),
('manage-deadlines', bulk_manage_deadline.App)
]
id = 'devilry_examiner_assignment'
rolefrontpage_appname = 'grouplist'
flatten_rolefrontpage_url = True
def get_rolequeryset(self):
return Assignment.objects\
.filter_examiner_has_access(self.request.user)\
.prefetch_point_to_grade_map()\
.distinct()
def get_titletext_for_role(self, role):
"""
Get a short title briefly describing the given ``role``.
Remember that the role is a User.
"""
assignment = role
return assignment.get_path()
@property
def assignment(self):
return self.request.cradmin_role
@classmethod
def matches_urlpath(cls, urlpath):
return re.match('^/devilry_examiner/assignment/.*$', urlpath)
|
{
"content_hash": "e6068909f2253cdaf5fa4b7b24f02beb",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 89,
"avg_line_length": 33.58490566037736,
"alnum_prop": 0.700561797752809,
"repo_name": "devilry/devilry-django",
"id": "7d627ce41617e86436864d3db440aaa06bcfb7c6",
"size": "1780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/devilry_examiner/views/assignment/crinstance_assignment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "513510"
},
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "421969"
},
{
"name": "JavaScript",
"bytes": "756713"
},
{
"name": "Less",
"bytes": "166670"
},
{
"name": "PLpgSQL",
"bytes": "397986"
},
{
"name": "Python",
"bytes": "6507968"
},
{
"name": "Shell",
"bytes": "10328"
}
],
"symlink_target": ""
}
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True,
reason="Skipping test because should only be run when releasing minor transformers version",
)
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
]
)
class MultiNodeTest(unittest.TestCase):
def setUp(self):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(),
encoding="utf-8",
check=True,
)
assert hasattr(self, "env")
def create_estimator(self, instance_count):
job_name = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script,
source_dir=self.env.test_path,
role=self.env.role,
image_uri=self.env.image_uri,
base_job_name=job_name,
instance_count=instance_count,
instance_type=self.instance_type,
debugger_hook_config=False,
hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path},
metric_definitions=self.env.metric_definitions,
distribution=distribution,
py_version="py36",
)
def save_results_as_csv(self, job_name):
TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
# @parameterized.expand([(2,), (4,),])
@parameterized.expand([(2,)])
def test_script(self, instance_count):
# create estimator
estimator = self.create_estimator(instance_count)
# run training
estimator.fit()
# result dataframe
result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
train_runtime = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json", "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
|
{
"content_hash": "4b4b92c38c5051e80cb896063b3a388c",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 118,
"avg_line_length": 38.67272727272727,
"alnum_prop": 0.6114245416078985,
"repo_name": "huggingface/transformers",
"id": "8fb60d64a61f8cf5e5123795057cc0eed52ec69a",
"size": "4254",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/sagemaker/test_multi_node_data_parallel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
}
|
"""Parallel workflow execution via Condor DAGMan
"""
import os
import sys
import uuid
import time
from warnings import warn
from .base import (GraphPluginBase, logger)
from ...interfaces.base import CommandLine
class CondorDAGManPlugin(GraphPluginBase):
"""Execute using Condor DAGMan
The plugin_args input to run can be used to control the DAGMan execution.
The value of most arguments can be a literal string or a filename, where in
the latter case the content of the file will be used as the argument value.
Currently supported options are:
- submit_template : submit spec template for individual jobs in a DAG (see
CondorDAGManPlugin.default_submit_template for the default.
- initial_specs : additional submit specs that are prepended to any job's
submit file
- override_specs : additional submit specs that are appended to any job's
submit file
- wrapper_cmd : path to an executable that will be started instead of a node
script. This is useful for wrapper script that execute certain
functionality prior or after a node runs. If this option is
given the wrapper command is called with the respective Python
executable and the path to the node script as final arguments
- wrapper_args : optional additional arguments to a wrapper command
- dagman_args : arguments to be prepended to the arguments of the
condor_submit_dag call
- block : if True the plugin call will block until Condor has finished
processing the entire workflow (default: False)
"""
default_submit_template = """
universe = vanilla
notification = Never
executable = %(executable)s
arguments = %(nodescript)s
output = %(basename)s.out
error = %(basename)s.err
log = %(basename)s.log
getenv = True
"""
def _get_str_or_file(self, arg):
if os.path.isfile(arg):
content = open(arg).read()
else:
content = arg
return content
# XXX feature wishlist
# - infer data file dependencies from jobs
# - infer CPU requirements from jobs
# - infer memory requirements from jobs
# - looks like right now all jobs come in here, regardless of whether they
# actually have to run. would be good to be able to decide whether they
# actually have to be scheduled (i.e. output already exist).
def __init__(self, **kwargs):
for var, id_, val in \
(('_template', 'submit_template', self.default_submit_template),
('_initial_specs', 'template', ''),
('_initial_specs', 'initial_specs', ''),
('_override_specs', 'submit_specs', ''),
('_override_specs', 'override_specs', ''),
('_wrapper_cmd', 'wrapper_cmd', None),
('_wrapper_args', 'wrapper_args', ''),
('_block', 'block', False),
('_dagman_args', 'dagman_args', '')):
if 'plugin_args' in kwargs \
and not kwargs['plugin_args'] is None \
and id_ in kwargs['plugin_args']:
if id_ == 'wrapper_cmd':
val = os.path.abspath(kwargs['plugin_args'][id_])
elif id_ == 'block':
val = kwargs['plugin_args'][id_]
else:
val = self._get_str_or_file(kwargs['plugin_args'][id_])
setattr(self, var, val)
# TODO remove after some time
if 'plugin_args' in kwargs \
and not kwargs['plugin_args'] is None:
plugin_args = kwargs['plugin_args']
if 'template' in plugin_args:
warn("the 'template' argument is deprecated, use 'initial_specs' instead")
if 'submit_specs' in plugin_args:
warn("the 'submit_specs' argument is deprecated, use 'override_specs' instead")
super(CondorDAGManPlugin, self).__init__(**kwargs)
def _submit_graph(self, pyfiles, dependencies, nodes):
# location of all scripts, place dagman output in here too
batch_dir, _ = os.path.split(pyfiles[0])
# DAG description filename
dagfilename = os.path.join(batch_dir, 'workflow-%s.dag' % uuid.uuid4())
with open(dagfilename, 'wt') as dagfileptr:
# loop over all scripts, create submit files, and define them
# as jobs in the DAG
for idx, pyscript in enumerate(pyfiles):
node = nodes[idx]
# XXX redundant with previous value? or could it change between
# scripts?
template, initial_specs, override_specs, wrapper_cmd, wrapper_args = \
self._get_args(node,
["template", "initial_specs",
"override_specs", "wrapper_cmd",
"wrapper_args"])
# add required slots to the template
template = '%s\n%s\n%s\nqueue\n' % (
'%(initial_specs)s',
template,
'%(override_specs)s')
batch_dir, name = os.path.split(pyscript)
name = '.'.join(name.split('.')[:-1])
specs = dict(
# TODO make parameter for this,
initial_specs=initial_specs,
executable=sys.executable,
nodescript=pyscript,
basename=os.path.join(batch_dir, name),
override_specs=override_specs
)
if wrapper_cmd is not None:
specs['executable'] = wrapper_cmd
specs['nodescript'] = \
'%s %s %s' % (wrapper_args % specs, # give access to variables
sys.executable,
pyscript)
submitspec = template % specs
# write submit spec for this job
submitfile = os.path.join(batch_dir,
'%s.submit' % name)
with open(submitfile, 'wt') as submitfileprt:
submitfileprt.writelines(submitspec)
submitfileprt.close()
# define job in DAG
dagfileptr.write('JOB %i %s\n' % (idx, submitfile))
# define dependencies in DAG
for child in dependencies:
parents = dependencies[child]
if len(parents):
dagfileptr.write('PARENT %s CHILD %i\n'
% (' '.join([str(i) for i in parents]),
child))
# hand over DAG to condor_dagman
cmd = CommandLine('condor_submit_dag', environ=os.environ.data,
terminal_output='allatonce')
# needs -update_submit or re-running a workflow will fail
cmd.inputs.args = '%s -update_submit %s' % (self._dagman_args,
dagfilename)
cmd.run()
logger.info('submitted all jobs to Condor DAGMan')
if self._block:
# wait for DAGMan to settle down, no time wasted it is already running
time.sleep(10)
if not os.path.exists('%s.condor.sub' % dagfilename):
raise EnvironmentError("DAGMan did not create its submit file, please check the logs")
# wait for completion
logger.info('waiting for DAGMan to finish')
lockfilename = '%s.lock' % dagfilename
while os.path.exists(lockfilename):
time.sleep(5)
|
{
"content_hash": "eeaa5a5ce5be120481f0b3f87ece2437",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 102,
"avg_line_length": 46.05917159763314,
"alnum_prop": 0.545606372045221,
"repo_name": "iglpdc/nipype",
"id": "4f89a13ce684cbb250b5a007e6891863162fc031",
"size": "7784",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nipype/pipeline/plugins/dagman.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2106"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "4458175"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dictionary', '0002_word_category'),
]
operations = [
migrations.AddField(
model_name='word',
name='times_practiced',
field=models.PositiveIntegerField(default=1),
preserve_default=True,
),
]
|
{
"content_hash": "3d330dfb01145d3760d9a7e4da2e7577",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 57,
"avg_line_length": 22.263157894736842,
"alnum_prop": 0.5981087470449172,
"repo_name": "burun/FinnDict-sqlite",
"id": "fc1d495ae40329aabf9ff769d6b34c41a134fe28",
"size": "447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dictionary/migrations/0003_word_times_practiced.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6043"
},
{
"name": "Python",
"bytes": "10026"
}
],
"symlink_target": ""
}
|
from django.views import i18n
from django.conf.urls import url
from django.urls import path
from django.contrib import admin
from django.contrib.auth.views import LoginView
import mittab.apps.tab.views as views
import mittab.apps.tab.judge_views as judge_views
import mittab.apps.tab.team_views as team_views
import mittab.apps.tab.debater_views as debater_views
import mittab.apps.tab.pairing_views as pairing_views
import mittab.apps.tab.outround_pairing_views as outround_pairing_views
admin.autodiscover()
urlpatterns = [
url(r"^admin/logout/$", views.tab_logout, name="admin_logout"),
url(r"^accounts/logout/$", views.tab_logout, name="logout"),
url(r"^admin/", admin.site.urls, name="admin"),
url(r"^dynamic-media/jsi18n/$", i18n.JavaScriptCatalog.as_view(), name="js18"),
url(r"^$", views.index, name="index"),
url(r"^403/", views.render_403, name="403"),
url(r"^404/", views.render_404, name="404"),
url(r"^500/", views.render_500, name="500"),
# Account related
url(r"^accounts/login/$",
LoginView.as_view(template_name="registration/login.html"),
name="tab_login"),
# Judge related
url(r"^judges/", judge_views.public_view_judges, name="public_judges"),
url(r"^judge/(\d+)/$", judge_views.view_judge, name="view_judge"),
url(r"^judge/(\d+)/scratches/add/(\d+)/",
judge_views.add_scratches,
name="add_scratches"),
url(r"^judge/(\d+)/scratches/view/",
judge_views.view_scratches,
name="view_scratches"),
url(r"^judge/(\d+)/check_ins/round/(\d+)/$",
judge_views.judge_check_in,
name="judge_check_in"),
url(r"^view_judges/$", judge_views.view_judges, name="view_judges"),
url(r"^enter_judge/$", judge_views.enter_judge, name="enter_judge"),
url(r"^batch_checkin/$", judge_views.batch_checkin, name="batch_checkin"),
# School related
url(r"^school/(\d+)/$", views.view_school, name="view_school"),
url(r"^school/(\d+)/delete/$", views.delete_school, name="delete_school"),
url(r"^view_schools/$", views.view_schools, name="view_schools"),
url(r"^enter_school/$", views.enter_school, name="enter_school"),
# Room related
url(r"^room/(\d+)/$", views.view_room, name="view_room"),
url(r"^view_rooms/$", views.view_rooms, name="view_rooms"),
url(r"^enter_room/$", views.enter_room, name="enter_room"),
url(r"^room/(\d+)/check_ins/round/(\d+)/$",
views.room_check_in,
name="room_check_in"),
url(r"^batch_room_checkin/$", views.batch_checkin, name="batch_room_checkin"),
# Scratch related
url(r"^judge/(\d+)/scratches/delete/(\d+)/",
views.delete_scratch,
name="delete_scratch_judge"),
url(r"^team/(\d+)/scratches/delete/(\d+)/",
views.delete_scratch,
name="delete_scratch_team"),
url(r"^scratches/view/", views.view_scratches, name="view_scratches"),
url(r"^enter_scratch/", views.add_scratch, name="add_scratch"),
# Team related
url(r"^teams/", team_views.public_view_teams, name="public_teams"),
url(r"^team/(\d+)/$", team_views.view_team, name="view_team"),
url(r"^team/(\d+)/scratches/add/(\d+)/",
team_views.add_scratches,
name="add_scratches"),
url(r"^team/(\d+)/scratches/view/",
team_views.view_scratches,
name="view_scratches_team"),
url(r"^team/(\d+)/stats/", team_views.team_stats, name="team_stats"),
url(r"^view_teams/$", team_views.view_teams, name="view_teams"),
url(r"^enter_team/$", team_views.enter_team, name="enter_team"),
url(r"^all_tab_cards/$", team_views.all_tab_cards, name="all_tab_cards"),
url(r"^team/card/(\d+)/$", team_views.tab_card, name="tab_card"),
url(r"^team/card/(\d+)/pretty/$",
team_views.pretty_tab_card,
name="pretty_tab_card"),
url(r"^team/ranking/$", team_views.rank_teams_ajax,
name="rank_teams_ajax"),
url(r"^team/rank/$", team_views.rank_teams, name="rank_teams"),
# Debater related
url(r"^debater/(\d+)/$", debater_views.view_debater, name="view_debater"),
url(r"^view_debaters/$", debater_views.view_debaters,
name="view_debaters"),
url(r"^enter_debater/$", debater_views.enter_debater,
name="enter_debater"),
url(r"^debater/ranking/$",
debater_views.rank_debaters_ajax,
name="rank_debaters_ajax"),
url(r"^debater/rank/$", debater_views.rank_debaters, name="rank_debaters"),
# Pairing related
url(r"^pairings/status/$", pairing_views.view_status, name="view_status"),
url(r"^pairings/view_rounds/$",
pairing_views.view_rounds,
name="view_rounds"),
url(r"^round/(\d+)/$", pairing_views.view_round, name="view_round"),
url(r"^round/(\d+)/result/$",
pairing_views.enter_result,
name="enter_result"),
url(r"^round/(\d+)/result/(\d+)/$",
pairing_views.enter_multiple_results,
name="enter_multiple_results"),
url(r"^round/(\d+)/alternative_judges/(\d+)/$",
pairing_views.alternative_judges,
name="round_alternative_judges"),
url(r"^round/(\d+)/(\d+)/alternative_teams/(gov|opp)/$",
pairing_views.alternative_teams,
name="round_alternative_teams"),
url(r"^round/(\d+)/alternative_judges/$",
pairing_views.alternative_judges,
name="alternative_judges"),
url(r"^round/(\d+)/assign_judge/(\d+)/$",
pairing_views.assign_judge,
name="assign_judge"),
url(r"^pairings/assign_team/(\d+)/(gov|opp)/(\d+)/$",
pairing_views.assign_team,
name="assign_team"),
url(r"^round/(\d+)/assign_judge/(\d+)/(\d+)/$",
pairing_views.assign_judge,
name="swap_judge"),
url(r"^pairing/pair_round/$", pairing_views.pair_round, name="pair_round"),
url(r"^pairing/assign_judges/$",
pairing_views.assign_judges_to_pairing,
name="assign_judges"),
url(r"^pairing/confirm_start_tourny/$",
pairing_views.confirm_start_new_tourny,
name="confirm_start_tourny"),
url(r"^pairing/start_tourny/$",
pairing_views.start_new_tourny,
name="start_tourny"),
url(r"^pairings/pairinglist/$",
pairing_views.pretty_pair,
name="pretty_pair"),
url(r"^pairings/missing_ballots/$",
pairing_views.missing_ballots,
name="missing_ballots"),
url(r"^pairings/pairinglist/printable/$",
pairing_views.pretty_pair_print,
name="pretty_pair_print"),
url(r"^pairing/backup/$",
pairing_views.manual_backup,
name="manual_backup"),
url(r"^pairing/release/$",
pairing_views.toggle_pairing_released,
name="toggle_pairing_released"),
url(r"^pairing/view_backups/$",
pairing_views.view_backups,
name="view_backups"),
url(r"^e_ballots/$", pairing_views.e_ballot_search,
name="e_ballot_search"),
url(r"e_ballots/(\S+)/$",
pairing_views.enter_e_ballot,
name="enter_e_ballot"),
# Outround related
url(r"break/",
outround_pairing_views.break_teams,
name="break"),
path("outround_pairing/<int:type_of_round>/<int:num_teams>",
outround_pairing_views.outround_pairing_view,
name="outround_pairing_view"),
path("outround_pairing",
outround_pairing_views.outround_pairing_view,
name="outround_pairing_view_default"),
url(r"^outround/(\d+)/alternative_judges/(\d+)/$",
outround_pairing_views.alternative_judges,
name="outround_alternative_judges"),
url(r"^outround/(\d+)/(\d+)/alternative_teams/(gov|opp)/$",
outround_pairing_views.alternative_teams,
name="outround_alternative_teams"),
url(r"^outround/(\d+)/alternative_judges/$",
outround_pairing_views.alternative_judges,
name="outround_alternative_judges"),
url(r"^outround/(\d+)/assign_judge/(\d+)/$",
outround_pairing_views.assign_judge,
name="outround_assign_judge"),
url(r"^outround/pairings/assign_team/(\d+)/(gov|opp)/(\d+)/$",
outround_pairing_views.assign_team,
name="outround_assign_team"),
url(r"^outround/(\d+)/assign_judge/(\d+)/(\d+)/$",
outround_pairing_views.assign_judge,
name="outround_swap_judge"),
url(r"^outround/(\d+)/result/$",
outround_pairing_views.enter_result,
name="enter_result"),
path("outround_pairing/pair/<int:type_of_round>/<int:num_teams>/",
outround_pairing_views.pair_next_outround,
name="next_outround"),
path("outround_pairings/pairinglist/<int:type_of_round>/",
outround_pairing_views.pretty_pair,
name="outround_pretty_pair"),
path("outround_pairings/pairinglist/printable/<int:type_of_round>/",
outround_pairing_views.pretty_pair_print,
name="outround_pretty_pair_print"),
path("outround_pairing/release/<int:num_teams>/<int:type_of_round>/",
outround_pairing_views.toggle_pairing_released,
name="toggle_outround_pairing_released"),
path("outround_result/<int:type_of_round>",
outround_pairing_views.forum_view,
name="forum_view"),
path("outround_choice/<int:outround_id>",
outround_pairing_views.update_choice,
name="update_choice"),
# Settings related
url(r"^settings_form",
views.settings_form,
name="settings_form"),
# Backups
url(r"^backup/restore/(.+)/$",
pairing_views.restore_backup,
name="restore_backup"),
url(r"^backup/download/(.+)/$",
pairing_views.download_backup,
name="download_backup"),
url(r"^backup/(.+)/$", pairing_views.view_backup, name="view_backup"),
url(r"^upload_backup/$", pairing_views.upload_backup,
name="upload_backup"),
# Data Upload
url(r"^import_data/$", views.upload_data, name="upload_data"),
# Tournament Archive
url(r"^archive/download/$", views.generate_archive, name="download_archive"),
# Cache related
url(r"^cache_refresh", views.force_cache_refresh, name="cache_refresh"),
]
handler403 = "mittab.apps.tab.views.render_403"
handler404 = "mittab.apps.tab.views.render_404"
handler500 = "mittab.apps.tab.views.render_500"
|
{
"content_hash": "5a7d50ff4d981be38f711ddb23f70c09",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 83,
"avg_line_length": 41.15725806451613,
"alnum_prop": 0.6247673165474674,
"repo_name": "jolynch/mit-tab",
"id": "03b0f9b5186972942c1c630faa6c5e6f1c0998e9",
"size": "10207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mittab/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17375"
},
{
"name": "HTML",
"bytes": "59858"
},
{
"name": "JavaScript",
"bytes": "13569"
},
{
"name": "Makefile",
"bytes": "344"
},
{
"name": "Python",
"bytes": "262840"
},
{
"name": "Shell",
"bytes": "1469"
}
],
"symlink_target": ""
}
|
"""nova HACKING file compliance testing
built on top of pep8.py
"""
import fnmatch
import inspect
import logging
import os
import re
import subprocess
import sys
import tokenize
import warnings
import pep8
# Don't need this for testing
logging.disable('LOG')
#N1xx comments
#N2xx except
#N3xx imports
#N4xx docstrings
#N5xx dictionaries/lists
#N6xx calling methods
#N7xx localization
#N8xx git commit messages
IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session']
DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
# Monkey patch broken excluded filter in pep8
# See https://github.com/jcrocholl/pep8/pull/111
def excluded(self, filename):
"""
Check if options.exclude contains a pattern that matches filename.
"""
basename = os.path.basename(filename)
return any((pep8.filename_match(filename, self.options.exclude,
default=False),
pep8.filename_match(basename, self.options.exclude,
default=False)))
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(os.path.join(root, subdir)):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((pep8.filename_match(filename, filepatterns) and
not self.excluded(filename))):
runner(os.path.join(root, filename))
def is_import_exception(mod):
return (mod in IMPORT_EXCEPTIONS or
any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS))
def import_normalize(line):
# convert "from x import y" to "import x.y"
# handle "from x import y as z" to "import x.y as z"
split_line = line.split()
if ("import" in line and line.startswith("from ") and "," not in line and
split_line[2] == "import" and split_line[3] != "*" and
split_line[1] != "__future__" and
(len(split_line) == 4 or
(len(split_line) == 6 and split_line[4] == "as"))):
return "import %s.%s" % (split_line[1], split_line[3])
else:
return line
def nova_todo_format(physical_line):
"""Check for 'TODO()'.
nova HACKING guide recommendation for TODO:
Include your name with TODOs as in "#TODO(termie)"
N101
"""
pos = physical_line.find('TODO')
pos1 = physical_line.find('TODO(')
pos2 = physical_line.find('#') # make sure it's a comment
if (pos != pos1 and pos2 >= 0 and pos2 < pos):
return pos, "NOVA N101: Use TODO(NAME)"
def nova_except_format(logical_line):
"""Check for 'except:'.
nova HACKING guide recommends not using except:
Do not write "except:", use "except Exception:" at the very least
N201
"""
if logical_line.startswith("except:"):
yield 6, "NOVA N201: no 'except:' at least use 'except Exception:'"
def nova_except_format_assert(logical_line):
"""Check for 'assertRaises(Exception'.
nova HACKING guide recommends not using assertRaises(Exception...):
Do not use overly broad Exception type
N202
"""
if logical_line.startswith("self.assertRaises(Exception"):
yield 1, "NOVA N202: assertRaises Exception too broad"
def nova_one_import_per_line(logical_line):
"""Check for import format.
nova HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
BAD: from nova.rpc.common import RemoteError, LOG
N301
"""
pos = logical_line.find(',')
parts = logical_line.split()
if (pos > -1 and (parts[0] == "import" or
parts[0] == "from" and parts[2] == "import") and
not is_import_exception(parts[1])):
yield pos, "NOVA N301: one import per line"
_missingImport = set([])
def nova_import_module_only(logical_line):
"""Check for import module only.
nova HACKING guide recommends importing only modules:
Do not import objects, only modules
N302 import only modules
N303 Invalid Import
N304 Relative Import
"""
def importModuleCheck(mod, parent=None, added=False):
"""
If can't find module on first try, recursively check for relative
imports
"""
current_path = os.path.dirname(pep8.current_file)
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
valid = True
if parent:
if is_import_exception(parent):
return
parent_mod = __import__(parent, globals(), locals(),
[mod], -1)
valid = inspect.ismodule(getattr(parent_mod, mod))
else:
__import__(mod, globals(), locals(), [], -1)
valid = inspect.ismodule(sys.modules[mod])
if not valid:
if added:
sys.path.pop()
added = False
return logical_line.find(mod), ("NOVA N304: No "
"relative imports. '%s' is a relative import"
% logical_line)
return logical_line.find(mod), ("NOVA N302: import only "
"modules. '%s' does not import a module"
% logical_line)
except (ImportError, NameError) as exc:
if not added:
added = True
sys.path.append(current_path)
return importModuleCheck(mod, parent, added)
else:
name = logical_line.split()[1]
if name not in _missingImport:
if VERBOSE_MISSING_IMPORT != 'False':
print >> sys.stderr, ("ERROR: import '%s' in %s "
"failed: %s" %
(name, pep8.current_file, exc))
_missingImport.add(name)
added = False
sys.path.pop()
return
except AttributeError:
# Invalid import
return logical_line.find(mod), ("NOVA N303: Invalid import, "
"AttributeError raised")
# convert "from x import y" to " import x.y"
# convert "from x import y as z" to " import x.y"
import_normalize(logical_line)
split_line = logical_line.split()
if (logical_line.startswith("import ") and "," not in logical_line and
(len(split_line) == 2 or
(len(split_line) == 4 and split_line[2] == "as"))):
mod = split_line[1]
rval = importModuleCheck(mod)
if rval is not None:
yield rval
# TODO(jogo) handle "from x import *"
#TODO(jogo): import template: N305
def nova_import_alphabetical(logical_line, line_number, lines):
"""Check for imports in alphabetical order.
nova HACKING guide recommendation for imports:
imports in human alphabetical order
N306
"""
# handle import x
# use .lower since capitalization shouldn't dictate order
split_line = import_normalize(logical_line.strip()).lower().split()
split_previous = import_normalize(lines[line_number - 2]
).strip().lower().split()
# with or without "as y"
length = [2, 4]
if (len(split_line) in length and len(split_previous) in length and
split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
yield (0, "NOVA N306: imports not in alphabetical order (%s, %s)"
% (split_previous[1], split_line[1]))
def nova_import_no_db_in_virt(logical_line, filename):
if ("nova/virt" in filename and
not filename.endswith("fake.py") and
"nova import db" in logical_line):
yield (0, "NOVA N307: nova.db import not allowed in nova/virt/*")
def nova_docstring_start_space(physical_line, previous_logical):
"""Check for docstring not start with space.
nova HACKING guide recommendation for docstring:
Docstring should not start with space
N401
"""
# it's important that we determine this is actually a docstring,
# and not a doc block used somewhere after the first line of a
# function def
if (previous_logical.startswith("def ") or
previous_logical.startswith("class ")):
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE])
if (pos != -1 and len(physical_line) > pos + 4):
if (physical_line[pos + 3] == ' '):
return (pos, "NOVA N401: docstring should not start with"
" a space")
def nova_docstring_one_line(physical_line):
"""Check one line docstring end.
nova HACKING guide recommendation for one line docstring:
A one line docstring looks like this and ends in a period.
N402
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
if (pos != -1 and end and len(physical_line) > pos + 4):
if (physical_line[-5] != '.'):
return pos, "NOVA N402: one line docstring needs a period"
def nova_docstring_multiline_end(physical_line):
"""Check multi line docstring end.
nova HACKING guide recommendation for docstring:
Docstring should end on a new line
N403
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) == pos):
if (physical_line[pos + 3] == ' '):
return (pos, "NOVA N403: multi line docstring end on new line")
FORMAT_RE = re.compile("%(?:"
"%|" # Ignore plain percents
"(\(\w+\))?" # mapping key
"([#0 +-]?" # flag
"(?:\d+|\*)?" # width
"(?:\.\d+)?" # precision
"[hlL]?" # length mod
"\w))") # type
class LocalizationError(Exception):
pass
def check_i18n():
"""Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
"""
while True:
try:
token_type, text, _, _, line = yield
except GeneratorExit:
return
if (token_type == tokenize.NAME and text == "_" and
not line.startswith('def _(msg):')):
while True:
token_type, text, start, _, _ = yield
if token_type != tokenize.NL:
break
if token_type != tokenize.OP or text != "(":
continue # not a localization call
format_string = ''
while True:
token_type, text, start, _, _ = yield
if token_type == tokenize.STRING:
format_string += eval(text)
elif token_type == tokenize.NL:
pass
else:
break
if not format_string:
raise LocalizationError(start,
"NOVA N701: Empty localization string")
if token_type != tokenize.OP:
raise LocalizationError(start,
"NOVA N701: Invalid localization call")
if text != ")":
if text == "%":
raise LocalizationError(start,
"NOVA N702: Formatting operation should be outside"
" of localization method call")
elif text == "+":
raise LocalizationError(start,
"NOVA N702: Use bare string concatenation instead"
" of +")
else:
raise LocalizationError(start,
"NOVA N702: Argument to _ must be just a string")
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for key, spec in format_specs
if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
raise LocalizationError(start,
"NOVA N703: Multiple positional placeholders")
def nova_localization_strings(logical_line, tokens):
"""Check localization in line.
N701: bad localization call
N702: complex expression instead of string as argument to _()
N703: multiple positional placeholders
"""
gen = check_i18n()
next(gen)
try:
map(gen.send, tokens)
gen.close()
except LocalizationError as e:
yield e.args
#TODO(jogo) Dict and list objects
current_file = ""
def readlines(filename):
"""Record the current file being tested."""
pep8.current_file = filename
return open(filename).readlines()
def add_nova():
"""Monkey patch in nova guidelines.
Look for functions that start with nova_ and have arguments
and add them to pep8 module
Assumes you know how to write pep8.py checks
"""
for name, function in globals().items():
if not inspect.isfunction(function):
continue
args = inspect.getargspec(function)[0]
if args and name.startswith("nova"):
exec("pep8.%s = %s" % (name, name))
def once_git_check_commit_title():
"""Check git commit messages.
nova HACKING recommends not referencing a bug or blueprint in first line,
it should provide an accurate description of the change
N801
N802 Title limited to 50 chars
"""
#Get title of most recent commit
subp = subprocess.Popen(['git', 'log', '--no-merges', '--pretty=%s', '-1'],
stdout=subprocess.PIPE)
title = subp.communicate()[0]
if subp.returncode:
raise Exception("git log failed with code %s" % subp.returncode)
#From https://github.com/openstack/openstack-ci-puppet
# /blob/master/modules/gerrit/manifests/init.pp#L74
#Changeid|bug|blueprint
git_keywords = (r'(I[0-9a-f]{8,40})|'
'([Bb]ug|[Ll][Pp])[\s\#:]*(\d+)|'
'([Bb]lue[Pp]rint|[Bb][Pp])[\s\#:]*([A-Za-z0-9\\-]+)')
GIT_REGEX = re.compile(git_keywords)
error = False
#NOTE(jogo) if match regex but over 3 words, acceptable title
if GIT_REGEX.search(title) is not None and len(title.split()) <= 3:
print ("N801: git commit title ('%s') should provide an accurate "
"description of the change, not just a reference to a bug "
"or blueprint" % title.strip())
error = True
if len(title.decode('utf-8')) > 72:
print ("N802: git commit title ('%s') should be under 50 chars"
% title.strip())
error = True
return error
if __name__ == "__main__":
#include nova path
sys.path.append(os.getcwd())
#Run once tests (not per line)
once_error = once_git_check_commit_title()
#NOVA error codes start with an N
pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}')
add_nova()
pep8.current_file = current_file
pep8.readlines = readlines
pep8.StyleGuide.excluded = excluded
pep8.StyleGuide.input_dir = input_dir
try:
pep8._main()
sys.exit(once_error)
finally:
if len(_missingImport) > 0:
print >> sys.stderr, ("%i imports missing in this test environment"
% len(_missingImport))
|
{
"content_hash": "a43bdfd83f6cab05922000531dac9f8a",
"timestamp": "",
"source": "github",
"line_count": 469,
"max_line_length": 79,
"avg_line_length": 34.42004264392324,
"alnum_prop": 0.5689772656879143,
"repo_name": "fajoy/nova",
"id": "a860aa37b4c5fba33e883adae89b65fb4a18a159",
"size": "16843",
"binary": false,
"copies": "1",
"ref": "refs/heads/grizzly-2",
"path": "tools/hacking.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7567423"
},
{
"name": "Shell",
"bytes": "15428"
}
],
"symlink_target": ""
}
|
from __future__ import division
from laspy.file import File
import numpy as np
import pandas as pd
import time, math
def timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
print('%s function took %0.3f ms' % (f.func_name, (time2-time1)*1000.0))
return ret
return wrap
@timing
def loadLAS2XYZ(filepath):
'''
Function to load in console the pointcloud of a LAS file
:param filepath: filepath of the LAS file
:return: xyz array containing coordinate of the points
'''
print('Start loading...')
inFile = File(filepath, mode='r')
coords = np.vstack((inFile.x, inFile.y, inFile.z)).transpose()
print('Data loaded')
return coords
@timing
def loadLAS2XYZAIR(filepath):
'''
Function to load in console the pointcloud of a LAS file with points attributes
:param filepath: filepath of the LAS file
:return: xyz array containing coordinate of the points
'''
print('Start loading...')
inFile = File(filepath, mode='r')
coords = np.vstack((inFile.x, inFile.y, inFile.z, inFile.amplitude, inFile.Intensity, inFile.reflectance, inFile.num_returns)).transpose()
print('Data loaded')
return coords
def xyz2binarray(xyz, xstart, xend, ystart, yend, nx=1000, ny=1000, method='min'):
'''
Function to extract projected grid on the XY-plane of point cloud statistics
:param xyz: a 3 column vector containing the point location in cartesian coordinate system
:param xstart: x-minimum of the grid
:param xend: x-maximum of the grid
:param ystart: y-minimm of the grid
:param yend: y-maximum of the grid
:param nx: number of grid cell in the x directions
:param ny: number of grid cell in the y directions
:param method: statistics to extract from each gridcell
:return: returns a 2D array, xmin, and ymax
TO IMPLEMENT:
- being able to choose to input dx dy instead of nx ny
'''
binned, bins_x, bins_y, bin_xmin, bin_ymin = binData2D(xyz, xstart, xend, ystart, yend, nx, ny)
if method == 'min':
ret = binned.Z.min().unstack().T # .iloc[::-1]
elif method == 'max':
ret = binned.Z.max().unstack().T # .iloc[::-1]
elif method == 'mean':
ret = binned.Z.mean().unstack().T # .iloc[::-1]
elif method == 'median':
ret = binned.Z.median().unstack().T # .iloc[::-1]
elif method == 'count':
ret = binned.Z.count().unstack().T # .iloc[::-1]
xmin = bins_x[ret.columns.min().astype(int)]
ymax = bins_y[ret.index.get_values().max().astype(int)]
newIndy = np.arange(ret.index.get_values().min(), ret.index.get_values().max() + 1)
newIndx = np.arange(ret.columns.min(), ret.columns.max() + 1)
a = ret.reindex(newIndy, newIndx)
mat = np.zeros((ny, nx)) * np.nan
mat[bin_ymin:bin_ymin + a.shape[0], bin_xmin:bin_xmin + a.shape[1]] = a
return mat[::-1], xmin, ymax
def LAS2txt(filepath,newfile):
'''
Function to convert a pointcloud save in LAS format into a .txt format
:param filepath: filepath of the LAS file
:param newfile: name of the new file
:return: save data into a text file
'''
inFile = File(filepath, mode='r')
coords = np.vstack((inFile.x, inFile.y, inFile.z)).transpose()
if newfile[-4] != '.txt':
newfile = newfile + '.txt'
np.savetxt(newfile,coords)
print('File saved: ' + newfile)
def xyz_subsample(xyz, length_out):
'''
Function to subsample a 3 columm matrix.
:param xyz: 3 column matrix
:param length_out: number of sample to output
:return: a 3 column matrix
'''
ind = np.random.randint(0,xyz.shape[0],length_out)
xyz_new = xyz[ind,:]
print('xyz subsampled!')
return xyz_new
def xyz_stat(xyz):
print('Shape of array: ' + str(xyz.shape))
print('Min of xyz: ')
print(np.min(xyz, axis=0))
print('Max of xyz: ')
print(np.max(xyz, axis=0))
print('Mean of xyz: ')
print(np.mean(xyz, axis=0))
print('Extent')
print(np.max(xyz, axis=0)-np.min(xyz, axis=0))
def trans(xyz,trans_vec):
'''
Function to translate an xyz 3 column matrix
:param xyz: a 3 column matrix
:param trans_vec: a translation vector of length 3
:return: a 3 column matrix translated
'''
xyz[:,0] = xyz[:,0] - trans_vec[0]
xyz[:,1] = xyz[:,1] - trans_vec[1]
xyz[:,2] = xyz[:,2] - trans_vec[2]
return xyz
def translate_coords(coords, xyz_trans = None ,ask = True):
'''
Function to translate a point cloud
:param coords: an xyz array
:param xyz_trans: vector of translation in [x,y,z]
:param ask: if True (default) brings an interactive console for approving the translation
:return: translated xyz array
'''
if xyz_trans is None:
xyz_trans = [coords[:,0].min(), coords[:,1].min(), coords[:,2].min()]
if ask is True:
print('Default translation:')
print(str(xyz_trans) + '\n')
res = input('Do you want to translate? 0/1')
if res is 0:
print('No Translation applied')
return None
if res is 1:
return trans(coords, xyz_trans)
if ask is not True:
return trans(coords, xyz_trans)
def truncate(xyz, Xextent, Yextent):
'''
Function to truncate a point cloud with a rectangular shape
:param xyz: a 3 column matrix containing the points coordinate
:param Xextent: a vector of Xmin and Xmax (e.g. [Xmin,Xmax])
:param Yextent: a vector of Ymin and Ymax (e.g. [Ymin, Ymax])
:return: a 3 colum matrix containing the points coordiante within the specified rectangle
'''
xcut = xyz[xyz[:,0]>=Xextent[0]]
xcut1 = xcut[xcut[:,0]<Xextent[1]]
ycut = xcut1[xcut1[:,1]>=Yextent[0]]
ycut1 = ycut[ycut[:,1]<Yextent[1]]
return ycut1
def cart2cyl(xyz, xy_axis=None):
'''
function to convert cartesian coordinates to cylindrical
:param xyz: a 3-column matrix containing the points coordinates expressed in a cartesian system
:param xy_axis: an array of x and y coordinate for the center of the new cylindrical coordinate
:return: a 3 colum matrix with the point coordinates are expressed in a cylindrical coordinate system
'''
if xy_axis is not None:
xyz[:,0] = xyz[:,0] - xy_axis[0]
xyz[:,1] = xyz[:,1] - xy_axis[1]
rho = np.sqrt(xyz[:,0]**2 + xyz[:,1]**2)
phi = np.arctan2(xyz[:,1], xyz[:,0])
rpz = np.vstack((rho,phi,xyz[:,2]))
return rpz.transpose()
def cyl2cart(rpz):
'''
convert cylindrical coordinate to cartesian
:param rpz: a 3-column matrix containing the points coordinates expressed in a cylindrical system
:return: a 3-column matrix containing the points coordinates expressed in a cartesian system
'''
x = rpz[:,0] * np.cos(rpz[:,1])
y = rpz[:,0] * np.sin(rpz[:,1])
xyz = np.vstack((x,y,rpz[:,2]))
return xyz.transpose()
def rotate_cloud(xyz, angle, center_coord=None):
'''
Function to rotate a point cloud
:param xyz: n*3 array containing point cloud coordinates in a cartesian system
:param angle: angle of rotation in degrees
:param center_coord: tuple with xy coordiantes of the center of rotation. Default is None
:return: the rotated xyz point cloud
'''
if center_coord is None:
center_coord = [np.mean(xyz[:,0]),np.mean(xyz[:,1])]
rpz = cart2cyl(xyz, xy_axis=center_coord)
rpz[:,1] = rpz[:,1] + angle
xyz = cyl2cart(rpz)
return xyz
def get_slice(xyz, thick, dir=0, center_coord=None):
'''
Function to extract a slice of the point cloud xyz
:param xyz: n*3 array containing point cloud coordinates in a cartesian system
:param thick: thickness of the slice
:param dir: direction of the slice in degrees (default is 0)
:param center_coord: tuple with xy coordinates of the center of rotation. Default is None
:return: return slice in xyz format.
'''
if center_coord is None:
center_coord = [np.mean(xyz[:,0]),np.mean(xyz[:,1])]
print(center_coord)
if dir % 180 != 0:
xyz = rotate_cloud(xyz, (dir*math.pi/180), center_coord= center_coord)
myslice = xyz[xyz[:,0]>=-(thick/2)]
myslice = myslice[myslice[:,0]<=(thick/2)]
return myslice
def get_slice_df(df_xyz, thick, dir=0, center_coord=None):
'''
Function to extract a slice of points from a dataframe
:param xyz: n*3 array containing point cloud coordinates in a cartesian system
:param thick: thickness of the slice
:param dir: direction of the slice in degrees (default is 0)
:param center_coord: tuple with xy coordinates of the center of rotation. Default is None
:return: return slice in xyz format.
'''
df = df_xyz.copy()
df_xyz=None
if center_coord is None:
center_coord = [df['x'].mean(),df['y'].mean()]
print(center_coord)
if dir % 180 != 0:
xyz = rotate_cloud(np.array(df[['x','y','z']]), (dir*math.pi/180), center_coord = center_coord)
df[['x','y']] = xyz[:,[0,1]]
myslice = df[df.x >= - (thick / 2)]
myslice = myslice[df.x <= (thick/2)]
else:
myslice = df[df.x >= (center_coord[0] - thick / 2)]
myslice = myslice[df.x <= (center_coord[0] + thick / 2)]
myslice['x'] = myslice['x'] - center_coord[0]
myslice['y'] = myslice['y'] - center_coord[1]
print('Data Sliced')
return myslice
def center_pc_coord_df(df_xyz, center_coord=None):
if center_coord is None:
center_coord = [(df_xyz['x'].max()-df_xyz['x'].min())/2 + df_xyz['x'].min(),
(df_xyz['y'].max()-df_xyz['y'].min())/2 +df_xyz['y'].min()]
print(center_coord)
df_xyz['x'] = df_xyz['x'] - center_coord[0]
df_xyz['y'] = df_xyz['y'] - center_coord[1]
return df_xyz
@timing
def binData2D(myXYZ, xstart, xend, ystart, yend, nx, ny):
'''
Fucntion to bin a scatter point cloud (xyz) into a 2d array
:param myXYZ: xyz array containings the point cloud coordiantes
:param xstart:
:param xend:
:param ystart:
:param yend:
:param nx: number of cells along the x-axis
:param ny: number of cells along hte y-axis
:return: a group object (pandas library) with all points classified into bins
'''
# note, the division requires: from _future_ import division
x = myXYZ[:,0].ravel()
y = myXYZ[:,1].ravel()
z = myXYZ[:,2].ravel()
df = pd.DataFrame({'X' : x , 'Y' : y , 'Z' : z})
bins_x = np.linspace(xstart, xend, nx+1)
x_cuts = pd.cut(df.X,bins_x, labels=False)
bins_y = np.linspace(ystart,yend, ny+1)
y_cuts = pd.cut(df.Y,bins_y, labels=False)
bin_xmin, bin_ymin = x_cuts.min(), y_cuts.min()
print('Data cut in a ' + str(bins_x.__len__()) + ' by ' + str(bins_y.__len__()) + ' matrix')
dx = (xend - xstart)/nx
dy = (yend - ystart)/ny
print('dx = ' + str(dx) + ' ; dy = ' + str (dy))
grouped = df.groupby([x_cuts,y_cuts])
print('Data grouped, \nReady to go!!')
return grouped, bins_x, bins_y, int(bin_xmin), int(bin_ymin)
#=====================================================================
#=====================================================================
# Function in PROGRESS !!! Use at your own risk
#===================================================================
@timing
def binData3D(xyz,xstart, xend, ystart, yend, zstart, zend,nx,ny,nz):
# not ready !!!!
x = xyz[:,0].ravel()
y = xyz[:,1].ravel()
z = xyz[:,2].ravel()
df = pd.DataFrame({'X' : x , 'Y' : y , 'Z' : z})
bins_x = np.linspace(xstart,xend,nx)
x_cuts = pd.cut(df.X,bins_x, labels=False)
bins_y = np.linspace(ystart,yend,ny)
y_cuts = pd.cut(df.Y,bins_y, labels=False)
bins_z = np.linspace(zstart, zend, nz)
z_cuts = pd.cut(df.Z,bins_z, labels=False)
print('Data cut in a ' + str(bins_x.__len__()) + ' by ' + str(bins_y.__len__()) + ' by ' + str)(bins_z.__len__()) + ' matrix'
dx = (xend-xstart)/nx
dy = (yend - ystart)/ny
dz = (zend - zstart)/nz
print('dx = ' + str(dx) + ' ; dy = ' + str (dy) + ' ; dz = ' + str (dz))
# create a 3D array
my3d = np.zeros((len(x_cuts),len(y_cuts),len(z_cuts))) * np.nan
# for loop through the vertical cuts of the poitn clouf to extrac 2d array for each
for i in np.arange(z_cuts.min(),z_cuts.max()):
subdf = df[z_cuts==i]
# group layer into false and true depnding if presence of points or not
grouped = subdf.groupby([x_cuts,ycuts]).filter(lambda x: np.shape(x)[0]>=1)
#unstack group into a 2d array
z = grouped.Z
# add 2d array to 3d array
my3d[:,:,i] = my2d
print('Data grouped, \nReady to go!!')
return my3d #3D array
# def cart2sphere():
# # write function to convert xyz point coordinates to spehrical coordiantes
#
# def sphere2cart():
# # write the reverse operation from cart2sphere()
|
{
"content_hash": "ffebe48b7eae244011f761cc7abd6df8",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 142,
"avg_line_length": 37.85087719298246,
"alnum_prop": 0.6047122441096948,
"repo_name": "ArcticSnow/dempy",
"id": "4a43fd1adc81b5d0235d6e60129b9448ec625da5",
"size": "12945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dempy/pointcloud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "4709"
},
{
"name": "Python",
"bytes": "95257"
},
{
"name": "Shell",
"bytes": "17216"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from collections.abc import Iterable
import confluent_kafka
from config import get_kafka_args
from db.models import WeiboData
from logger import crawler
conf = get_kafka_args()
data_type_dict = {'dataset': 0, 'datastream': 1, 'keyword': 2, 'home': 3}
def produce_data_list(topic: str, data_type: str, data_id: int, data_list: Iterable, area: str = ''):
p = confluent_kafka.Producer(conf)
for weibo_item in data_list:
produce_data(p, topic, data_type, data_id, weibo_item, area)
kafka_flush(p)
def produce_data(p, topic: str, data_type: str, data_id: int, data: WeiboData, area: str = ''):
p.poll(0)
p.produce(topic, __serialize(data_type_dict[data_type], data_id, data, area), callback=delivery_report)
def delivery_report(err, msg):
""" Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
crawler.error(f'Kafka message delivery failed: {err}')
else:
crawler.info(f'Kafka message delivered to {msg.topic()} [{msg.partition()}]')
def kafka_flush(p):
return p.flush()
def __serialize(data_type: int, data_id: int, weibo_item: WeiboData, area=''):
mid_bytes = bytes(weibo_item.weibo_id, encoding='utf-8')
content_bytes = bytes(weibo_item.weibo_cont, encoding='utf-8')
location_bytes = bytes(weibo_item.weibo_location, encoding='utf-8')
area_bytes = bytes(area, encoding='utf-8')
time = int(datetime.timestamp(weibo_item.create_time) * 1000)
time_bytes = time.to_bytes(length=8, byteorder='big')
data_bytes = [data_type.to_bytes(length=1, byteorder='big'),
(data_id.to_bytes(length=4, byteorder='big')),
(len(content_bytes).to_bytes(length=2, byteorder='big')),
(len(location_bytes).to_bytes(length=2, byteorder='big')),
(len(area_bytes).to_bytes(length=1, byteorder='big')),
mid_bytes,
content_bytes,
location_bytes,
area_bytes,
time_bytes]
return b''.join(data_bytes)
if __name__ == '__main__':
weibo_item_test = WeiboData()
weibo_item_test.weibo_cont = '#地震预警# 据成都高新减灾研究所,15:57 四川芦山(103.0,' \
'30.4)发生预警震级4.5级的地震,预警中心提前12秒向成都预警,预估烈度2.0度。此预警信息于震中发震后几秒内发出,最终地震参数以7-10' \
'分钟后中国地震台网发布的正式数据为准。用户可下载“地震预警”(ICL)手机软件接收地震预警。 \u200B\u200B\u200B\u200B'
weibo_item_test.weibo_location = ''
weibo_item_test.weibo_id = '4367530648576413'
weibo_item_test.create_time = datetime.fromtimestamp(1556783914)
s = __serialize(2, 1, weibo_item_test)
h = s.hex()
print(len(s))
print(h)
|
{
"content_hash": "327929aba6bc686b018f7b646124a7a5",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 108,
"avg_line_length": 39.73913043478261,
"alnum_prop": 0.6265499635302699,
"repo_name": "yzsz/weibospider",
"id": "9e455d6d3289285fab954c27ac0d27cb0d5c6e8a",
"size": "2970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kafka/producer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1401"
},
{
"name": "Python",
"bytes": "187164"
},
{
"name": "Shell",
"bytes": "623"
}
],
"symlink_target": ""
}
|
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.backend.interpreter import Value, TupleValue, TensorValue
from tvm.relay.backend.interpreter import RefValue, ConstructorValue
from tvm.relay.scope_builder import ScopeBuilder
from tvm.relay import testing, create_executor
def check_eval(expr, args, expected_result, mod=None, rtol=1e-07):
# TODO(tqchen) add more types once the schedule register is fixed.
for target in ["llvm"]:
ctx = tvm.context(target, 0)
if not ctx.exist:
return
intrp = create_executor(mod=mod, ctx=ctx, target=target)
result = intrp.evaluate(expr)(*args)
# use tvm.testing which also set atol
tvm.testing.assert_allclose(
result.asnumpy(), expected_result, rtol=rtol)
def test_from_scalar():
np.testing.assert_allclose(Value.from_scalar(1, 'int32').asnumpy(), 1)
np.testing.assert_allclose(Value.from_scalar(10.0, 'float32').asnumpy(), 10.0)
np.testing.assert_allclose(Value.from_scalar(True).asnumpy(), True)
def test_tuple_value():
tv = TupleValue(Value.from_scalar(
1), Value.from_scalar(2), Value.from_scalar(3))
np.testing.assert_allclose(tv[0].asnumpy(), 1)
np.testing.assert_allclose(tv[1].asnumpy(), 2)
np.testing.assert_allclose(tv[2].asnumpy(), 3)
def test_tuple_getitem():
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], relay.TupleGetItem(relay.Tuple([relay.const(1), relay.const(2)]), 0))
check_eval(func, [], 1)
def test_id():
x = relay.var('x', 'float32')
ident = relay.Function([x], x)
one = np.array(1.0, 'float32')
check_eval(ident, [one], one)
def test_add_const():
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
check_eval(func, [], 2)
def test_mul_param():
x = relay.var('x', shape=(10, 10))
y = relay.var('y', shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype('float32')
y_data = np.random.rand(1, 10).astype('float32')
check_eval(func, [x_data, y_data], x_data * y_data)
def test_equal():
i = relay.var('i', shape=[], dtype='int32')
j = relay.var('i', shape=[], dtype='int32')
z = relay.equal(i, j)
func = relay.Function([i, j], z, ret_type=relay.TensorType([], 'bool'))
i_data = relay.const(0, 'int32')
j_data = relay.const(0, 'int32')
check_eval(func, [i_data, j_data], True)
def test_subtract():
i = relay.var('i', shape=[], dtype='int32')
sub = relay.subtract(i, relay.const(1, dtype='int32'))
func = relay.Function([i], sub, ret_type=relay.TensorType([], 'int32'))
i_data = np.array(1, dtype='int32')
check_eval(func, [i_data], 0)
def test_simple_loop():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype='int32'))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype='int32'))
rec_call = relay.Call(sum_up, [one_less])
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
mod[sum_up] = func
i_data = np.array(10, dtype='int32')
check_eval(sum_up, [i_data], sum(range(1, 11)), mod=mod)
def test_loop():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
accum = relay.var('accum', shape=[], dtype='int32')
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, 'int32'))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, 'int32'))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
i_data = np.array(10, dtype='int32')
accum_data = np.array(0, dtype='int32')
check_eval(sum_up, [i_data, accum_data], sum(range(1, 11)), mod=mod)
def test_ref():
mod = relay.Module()
three_with_ref = relay.GlobalVar('three_with_ref')
i = relay.Var('i')
iv = relay.Var('iv')
u = relay.Var('u')
uv = relay.Var('uv')
body = relay.add(iv, uv)
body = relay.Let(uv, relay.RefRead(i), body)
body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)
body = relay.Let(iv, relay.RefRead(i), body)
body = relay.Let(i, relay.RefCreate(relay.const(1)), body)
mod[three_with_ref] = relay.Function([], body)
check_eval(three_with_ref, [], 3, mod=mod)
def test_binds():
x = relay.var("x")
y = relay.add(x, x)
intrp = create_executor("debug")
xx = np.ones((10, 20))
res = intrp.evaluate(y, binds={x: xx}).asnumpy()
tvm.testing.assert_allclose(xx + xx, res)
def test_tensor_value():
x = relay.var("x", shape=(1, 10))
xx = np.ones((1, 10)).astype("float32")
check_eval(relay.Function([x], x), [TensorValue(xx)], xx)
def test_kwargs_params():
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1, 10))
z = relay.var("z", shape=(1, 10))
f = relay.Function([x, y, z], x + y + z)
x_data = np.random.rand(1, 10).astype('float32')
y_data = np.random.rand(1, 10).astype('float32')
z_data = np.random.rand(1, 10).astype('float32')
params = { 'y': y_data, 'z': z_data }
intrp = create_executor("debug")
res = intrp.evaluate(f)(x_data, **params).data
tvm.testing.assert_allclose(res.asnumpy(), x_data + y_data + z_data)
def test_function_taking_adt_ref_tuple():
mod = relay.Module()
prelude = relay.prelude.Prelude(mod)
intrp = create_executor("debug", mod)
nil_value = ConstructorValue(prelude.nil.tag, [], prelude.nil)
cons_value = ConstructorValue(prelude.cons.tag, [
TensorValue(np.random.rand(1, 10).astype('float32')),
nil_value
], prelude.cons)
ref_value = RefValue(TensorValue(np.random.rand(1, 10).astype('float32')))
tuple_value = TupleValue(*[
TensorValue(np.random.rand(1, 10).astype('float32')) for _ in range(10)
])
id_func = intrp.evaluate(prelude.id)
res_nil = id_func(nil_value)
assert res_nil.tag == nil_value.tag
assert len(res_nil.fields) == 0
res_cons = id_func(cons_value)
assert res_cons.tag == cons_value.tag
assert len(res_cons.fields) == len(cons_value.fields)
tvm.testing.assert_allclose(res_cons.fields[0].asnumpy(),
cons_value.fields[0].asnumpy())
assert isinstance(res_cons.fields[1], ConstructorValue)
assert res_cons.fields[1].tag == prelude.nil.tag
assert len(res_cons.fields[1].fields) == 0
res_ref = id_func(ref_value)
tvm.testing.assert_allclose(res_ref.value.asnumpy(), ref_value.value.asnumpy())
res_tuple = id_func(tuple_value)
for i in range(10):
tvm.testing.assert_allclose(res_tuple.fields[i].asnumpy(),
tuple_value.fields[i].asnumpy())
def test_tuple_passing():
x = relay.var('x', type_annotation=relay.ty.TupleType([
relay.ty.TensorType((), 'int64'),
relay.ty.TensorType((), 'int64')]))
fn = relay.Function([x], relay.expr.TupleGetItem(x, 0))
mod = relay.Module({})
gv = relay.GlobalVar('main')
mod[gv] = fn
mod = relay.transform.InferType()(mod)
ctx = tvm.cpu()
target = tvm.target.create('llvm')
exec = relay.create_executor(mod=mod, ctx=ctx, target=target)
f = exec.evaluate(gv)
# First use a Python tuple.
out = f((10, 8))
tvm.testing.assert_allclose(out.asnumpy(), np.array(10))
# Second use a tuple value.
value_tuple = TupleValue(
TensorValue(np.array(11)),
TensorValue(np.array(12)))
out = f(value_tuple)
tvm.testing.assert_allclose(out.asnumpy(), np.array(11))
if __name__ == "__main__":
test_id()
test_add_const()
test_equal()
test_subtract()
test_simple_loop()
test_loop()
test_binds()
test_kwargs_params()
test_ref()
test_tensor_value()
test_tuple_value()
test_tuple_getitem()
test_function_taking_adt_ref_tuple()
test_tuple_passing()
|
{
"content_hash": "0d0ff4db873ca94dd6273313bad21aff",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 99,
"avg_line_length": 34.06995884773662,
"alnum_prop": 0.614325401618553,
"repo_name": "Huyuwei/tvm",
"id": "c1a19c4d9bb19d66e20c431b0f631277fda24347",
"size": "9064",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/relay/test_backend_interpreter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5569606"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6775044"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96633"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
"""distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
__revision__ = "$Id: build_ext.py 65670 2008-08-14 07:35:13Z hirokazu.yamamoto $"
import sys, os, re
from site import USER_BASE, USER_SITE
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
from distutils import log
if os.name == 'nt':
from distutils.msvccompiler import get_build_version
MSVC_VERSION = int(get_build_version())
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile \
(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_ext(Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('plat-name=', 'p',
"platform name to cross-compile for, if supported "
"(default: %s)" % get_platform()),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
('include-dirs=', 'I',
"list of directories to search for header files" + sep_by),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries" + sep_by),
('rpath=', 'R',
"directories to search for shared C libraries at runtime"),
('link-objects=', 'O',
"extra explicit link objects to include in the link"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
('swig-cpp', None,
"make SWIG create C++ files (default is C)"),
('swig-opts=', None,
"list of SWIG command line options"),
('swig=', None,
"path to the SWIG executable"),
('user', None,
"add user include, library and rpath"),
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.extensions = None
self.build_lib = None
self.plat_name = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
self.user = None
def finalize_options(self):
from distutils import sysconfig
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('plat_name', 'plat_name'),
)
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.append(py_include)
if plat_py_include != py_include:
self.include_dirs.append(plat_py_include)
if isinstance(self.libraries, str):
self.libraries = [self.libraries]
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
if self.rpath is None:
self.rpath = []
elif isinstance(self.rpath, str):
self.rpath = self.rpath.split(os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
# the 'libs' directory is for binary installs - we assume that
# must be the *native* platform. But we don't really support
# cross-compiling via a binary install anyway, so we let it go.
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC'))
if MSVC_VERSION == 9:
# Use the .lib files for the correct architecture
if self.plat_name == 'win32':
suffix = ''
else:
# win-amd64 or win-ia64
suffix = self.plat_name[4:]
new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
if suffix:
new_lib = os.path.join(new_lib, suffix)
self.library_dirs.append(new_lib)
elif MSVC_VERSION == 8:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS8.0', 'win32release'))
elif MSVC_VERSION == 7:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS7.1'))
else:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VC6'))
# OS/2 (EMX) doesn't support Debug vs Release builds, but has the
# import libraries in its "Config" subdirectory
if os.name == 'os2':
self.library_dirs.append(os.path.join(sys.exec_prefix, 'Config'))
# for extensions under Cygwin and AtheOS Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
"python" + get_python_version(),
"config"))
else:
# building python standard extensions
self.library_dirs.append('.')
# for extensions under Linux with a shared Python library,
# Python's library directory must be appended to library_dirs
if (sys.platform.startswith('linux') or sys.platform.startswith('gnu')) \
and sysconfig.get_config_var('Py_ENABLE_SHARED'):
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = self.define.split(',')
self.define = [(symbol, '1') for symbol in defines]
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = self.undef.split(',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# Finally add the user include and library directories if requested
if self.user:
user_include = os.path.join(USER_BASE, "include")
user_lib = os.path.join(USER_BASE, "lib")
if os.path.isdir(user_include):
self.include_dirs.append(user_include)
if os.path.isdir(user_lib):
self.library_dirs.append(user_lib)
self.rpath.append(user_lib)
def run(self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
if os.name == 'nt' and self.plat_name != get_platform():
self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
def check_extensions_list(self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(extensions, list):
raise DistutilsSetupError(
"'ext_modules' option must be a list of Extension instances")
for i, ext in enumerate(extensions):
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
(ext_name, build_info) = ext
log.warn("old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s'"
"-- please convert to Extension instance" % ext_name)
if not isinstance(ext, tuple) and len(ext) != 2:
raise DistutilsSetupError(
"each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple")
if not (isinstance(ext_name, str) and
extension_name_re.match(ext_name)):
raise DistutilsSetupError(
"first element of each tuple in 'ext_modules' "
"must be the extension name (a string)")
if not instance(build_info, DictionaryType):
raise DistutilsSetupError(
"second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)")
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in ('include_dirs',
'library_dirs',
'libraries',
'extra_objects',
'extra_compile_args',
'extra_link_args'):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if 'def_file' in build_info:
log.warn("'def_file' element of build info dict "
"no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
raise DistutilsSetupError(
"'macros' element of build info dict "
"must be 1- or 2-tuple")
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs(self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
outputs.append(os.path.join(self.build_lib,
self.get_ext_filename(fullname)))
return outputs
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
sources = list(sources)
fullname = self.get_ext_fullname(ext.name)
if self.inplace:
# ignore build-lib -- put the compiled extension into
# the source tree along with pure Python modules
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
base = modpath[-1]
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
ext_filename = os.path.join(package_dir,
self.get_ext_filename(base))
else:
ext_filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX -- this is a Vile HACK!
#
# The setup.py script for Python on Unix needs to be able to
# get this list so it can perform all the clean up needed to
# avoid keeping object files around when cleaning out a failed
# build of an extension module. Since Distutils does not
# track dependencies, we have to get rid of intermediates to
# ensure all the intermediates will be properly re-built.
#
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_filename,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def swig_sources(self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if self.swig_cpp or ('-c++' in self.swig_opts) or \
('-c++' in extension.swig_opts):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
def find_swig(self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
elif os.name == "os2":
# assume swig available in the PATH.
return "swig.exe"
else:
raise DistutilsPlatformError(
"I don't know how to find (much less run) SWIG "
"on platform '%s'" % os.name)
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullname(self, ext_name):
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename(self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = ext_name.split('.')
# OS/2 has an 8 character module (extension) limit :-(
if os.name == "os2":
ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8]
# extensions in debug_mode are named 'module_d.pyd' under windows
so_ext = get_config_var('SO')
if os.name == 'nt' and self.debug:
return os.path.join(*ext_path) + '_d' + so_ext
return os.path.join(*ext_path) + so_ext
def get_export_symbols(self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "PyInit_" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "init" function.
"""
initfunc_name = "PyInit_" + ext.name.split('.')[-1]
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries(self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows and OS/2, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils.msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
return ext.libraries
elif sys.platform == "os2emx":
# EMX/GCC requires the python library explicitly, and I
# believe VACPP does as well (though not confirmed) - AIM Apr01
template = "python%d%d"
# debug versions of the main DLL aren't supported, at least
# not at this time - AIM Apr01
#if self.debug:
# template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "cygwin":
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "atheos":
from distutils import sysconfig
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# Get SHLIBS from Makefile
extra = []
for lib in sysconfig.get_config_var('SHLIBS').split():
if lib.startswith('-l'):
extra.append(lib[2:])
else:
extra.append(lib)
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib, "m"] + extra
elif sys.platform == 'darwin':
# Don't use the default code below
return ext.libraries
else:
from distutils import sysconfig
if sysconfig.get_config_var('Py_ENABLE_SHARED'):
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
return ext.libraries + [pythonlib]
else:
return ext.libraries
|
{
"content_hash": "fb13da603d448b9f809854133b360fa3",
"timestamp": "",
"source": "github",
"line_count": 730,
"max_line_length": 86,
"avg_line_length": 43.14794520547945,
"alnum_prop": 0.5621626769953648,
"repo_name": "MalloyPower/parsing-python",
"id": "32b041bd99a8ccc3de52b0744fcc00b99a48ca28",
"size": "31498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.0/Lib/distutils/command/build_ext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Firefox()
driver.get("http://www.python.org")
assert "Python" in driver.title
elem = driver.find_element_by_name("q")
elem.clear()
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
driver.close()
|
{
"content_hash": "a277db4838b497336185f0cb9ab858a0",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 52,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.7657142857142857,
"repo_name": "ruslan2k/public-files",
"id": "19523ddcd15ff808743fc85bd5a4a3a4cbf2eee9",
"size": "350",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/05-pinax/mysite/tests/web-test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6738"
},
{
"name": "C++",
"bytes": "231"
},
{
"name": "CSS",
"bytes": "2873"
},
{
"name": "Clojure",
"bytes": "296439"
},
{
"name": "Dockerfile",
"bytes": "1832"
},
{
"name": "Erlang",
"bytes": "39780"
},
{
"name": "HTML",
"bytes": "32945"
},
{
"name": "JavaScript",
"bytes": "1600064"
},
{
"name": "Makefile",
"bytes": "8681"
},
{
"name": "PHP",
"bytes": "606101"
},
{
"name": "Perl",
"bytes": "383"
},
{
"name": "Python",
"bytes": "81625"
},
{
"name": "Ruby",
"bytes": "1979"
},
{
"name": "Shell",
"bytes": "2813"
}
],
"symlink_target": ""
}
|
from __future__ import division
import net.grinder.script.Grinder
package_path = net.grinder.script.Grinder.grinder.getProperties().getProperty(
"grinder.package_path")
import sys
sys.path.append(package_path)
from coverage import coverage
cov = coverage()
cov.start()
import time
import utils
import ingest
import ingestenum
import query
import annotationsingest
import unittest
import random
import math
import grinder
try:
from com.xhaus.jyson import JysonCodec as json
except ImportError:
import json
import pprint
pp = pprint.pprint
sleep_time = -1
get_url = None
post_url = None
post_payload = None
def mock_sleep(cls, x):
global sleep_time
sleep_time = x
class MockReq():
def POST(self, url, payload):
global post_url, post_payload
post_url = url
post_payload = payload
return url, payload
def GET(self, url):
global get_url
get_url = url
return url
class BluefloodTests(unittest.TestCase):
def setUp(self):
self.real_shuffle = random.shuffle
self.real_randint = random.randint
self.real_time = utils.AbstractThread.time
self.real_sleep = utils.AbstractThread.sleep
self.tm = ingest.ThreadManager(net.grinder.script.Grinder.grinder)
req = MockReq()
ingest.IngestThread.request = req
ingestenum.EnumIngestThread.request = req
annotationsingest.AnnotationsIngestThread.request = req
for x in query.QueryThread.query_types:
x.query_request = req
random.shuffle = lambda x: None
random.randint = lambda x, y: 0
utils.AbstractThread.time = lambda x: 1000
utils.AbstractThread.sleep = mock_sleep
test_config = {'report_interval': (1000 * 6),
'num_tenants': 3,
'enum_num_tenants': 4,
'annotations_num_tenants': 3,
'metrics_per_tenant': 7,
'enum_metrics_per_tenant': 2,
'annotations_per_tenant': 2,
'batch_size': 3,
'ingest_concurrency': 2,
'enum_ingest_concurrency': 2,
'query_concurrency': 20,
'annotations_concurrency': 2,
'singleplot_per_interval': 11,
'multiplot_per_interval': 10,
'search_queries_per_interval': 9,
'enum_search_queries_per_interval': 9,
'enum_single_plot_queries_per_interval': 10,
'enum_multiplot_per_interval': 10,
'annotations_queries_per_interval': 8,
'name_fmt': "int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.%d",
'num_nodes': 2}
ingest.default_config.update(test_config)
def test_init_process(self):
# confirm that threadnum 0 is an ingest thread
t1 = self.tm.setup_thread(0)
self.assertEqual(type(t1), ingest.IngestThread)
# confirm that the threadnum after all ingest threads is
# EnumIngestThread
t1 = self.tm.setup_thread(
ingestenum.default_config['enum_ingest_concurrency'])
self.assertEqual(type(t1), ingestenum.EnumIngestThread)
# confirm that the threadnum after all ingest threads is a query thread
t1 = self.tm.setup_thread(ingest.default_config['ingest_concurrency'] +
ingestenum.default_config[
'enum_ingest_concurrency'])
self.assertEqual(type(t1), query.QueryThread)
# confirm that the threadnum after all ingest+query threads is an
# annotations query thread
t1 = self.tm.setup_thread(ingest.default_config['ingest_concurrency'] +
ingestenum.default_config[
'enum_ingest_concurrency'] +
ingest.default_config['query_concurrency'])
self.assertEqual(type(t1), annotationsingest.AnnotationsIngestThread)
# confirm that a threadnum after all valid thread types raises an
# exception
tot_threads = (
ingest.default_config['ingest_concurrency'] +
ingest.default_config['enum_ingest_concurrency'] +
ingest.default_config['query_concurrency'] +
ingest.default_config['annotations_concurrency'])
self.assertRaises(Exception, self.tm.setup_thread, tot_threads)
# confirm that the correct batches of ingest metrics are created for
# worker 0
self.tm.create_all_metrics(0)
# confirm annotationsingest
self.assertEqual(annotationsingest.AnnotationsIngestThread.annotations,
[[0, 0], [0, 1], [1, 0], [1, 1]])
thread = annotationsingest.AnnotationsIngestThread(0)
self.assertEqual(thread.slice, [[0, 0], [0, 1]])
thread = annotationsingest.AnnotationsIngestThread(1)
self.assertEqual(thread.slice, [[1, 0], [1, 1]])
# confirm enum metrics ingest
self.assertEqual(ingestenum.EnumIngestThread.metrics,
[
[[0, 0], [0, 1], [1, 0]],
[[1, 1]]
])
thread = ingestenum.EnumIngestThread(0)
self.assertEqual(thread.slice, [[[0, 0], [0, 1], [1, 0]]])
thread = ingestenum.EnumIngestThread(1)
self.assertEqual(thread.slice, [[[1, 1]]])
# confirm metrics ingest
self.assertEqual(ingest.IngestThread.metrics,
[[[0, 0], [0, 1], [0, 2]],
[[0, 3], [0, 4], [0, 5]],
[[0, 6], [1, 0], [1, 1]],
[[1, 2], [1, 3], [1, 4]],
[[1, 5], [1, 6]]])
# confirm that the correct batch slices are created for individual
# threads
thread = ingest.IngestThread(0)
self.assertEqual(thread.slice,
[[[0, 0], [0, 1], [0, 2]],
[[0, 3], [0, 4], [0, 5]],
[[0, 6], [1, 0], [1, 1]]])
thread = ingest.IngestThread(1)
self.assertEqual(thread.slice,
[[[1, 2], [1, 3], [1, 4]],
[[1, 5], [1, 6]]])
# confirm that the number of queries is correctly distributed across
# each thread in this worker process
num_query_nodes = query.default_config['num_nodes']
single_plot_queries_agent0 = int(math.ceil(
query.default_config['singleplot_per_interval'] / num_query_nodes))
multi_plot_queries_agent0 = int(math.ceil(
query.default_config['multiplot_per_interval'] / num_query_nodes))
search_queries_agent0 = int(math.ceil(
query.default_config[
'search_queries_per_interval'] / num_query_nodes))
enum_search_queries_agent0 = int(math.ceil(
query.default_config[
'enum_search_queries_per_interval'] / num_query_nodes))
enum_single_plot_queries_agent0 = int(math.ceil(
query.default_config[
'enum_single_plot_queries_per_interval'] / num_query_nodes))
enum_multi_plot_queries_agent0 = int(math.ceil(
query.default_config[
'enum_multiplot_per_interval'] / num_query_nodes))
annotation_queries_agent0 = int(math.ceil(
query.default_config[
'annotations_queries_per_interval'] / num_query_nodes))
self.assertEqual(
query.QueryThread.queries,
([query.SinglePlotQuery] * single_plot_queries_agent0 +
[query.MultiPlotQuery] * multi_plot_queries_agent0 +
[query.SearchQuery] * search_queries_agent0 +
[query.EnumSearchQuery] * enum_search_queries_agent0 +
[query.EnumSinglePlotQuery] * enum_single_plot_queries_agent0 +
[query.AnnotationsQuery] * annotation_queries_agent0) +
[query.EnumMultiPlotQuery] * enum_multi_plot_queries_agent0)
thread = query.QueryThread(0)
self.assertEqual(thread.slice, [query.SinglePlotQuery] * 2)
thread = query.QueryThread(3)
self.assertEqual(thread.slice, [query.MultiPlotQuery] * 2)
thread = query.QueryThread(6)
self.assertEqual(thread.slice, [query.SearchQuery] * 2)
thread = query.QueryThread(9)
self.assertEqual(thread.slice, [query.EnumSearchQuery] * 2)
thread = query.QueryThread(12)
self.assertEqual(thread.slice, [query.EnumSinglePlotQuery] * 2)
thread = query.QueryThread(14)
self.assertEqual(thread.slice, [query.AnnotationsQuery] * 2)
thread = query.QueryThread(16)
self.assertEqual(thread.slice, [query.EnumMultiPlotQuery] * 1)
# confirm that the correct batches of ingest metrics are created for
# worker 1
self.tm.create_all_metrics(1)
self.assertEqual(ingest.IngestThread.metrics,
[[[2, 0], [2, 1], [2, 2]],
[[2, 3], [2, 4], [2, 5]],
[[2, 6]]])
self.assertEqual(annotationsingest.AnnotationsIngestThread.annotations,
[[2, 0], [2, 1]])
thread = ingest.IngestThread(0)
self.assertEqual(thread.slice,
[[[2, 0], [2, 1], [2, 2]],
[[2, 3], [2, 4], [2, 5]]])
thread = ingest.IngestThread(1)
self.assertEqual(thread.slice,
[[[2, 6]]])
# confirm that the correct batches of queries are created for worker 1
single_plot_queries_agent1 = \
query.default_config['singleplot_per_interval'] - \
single_plot_queries_agent0
multi_plot_queries_agent1 = \
query.default_config['multiplot_per_interval'] - \
multi_plot_queries_agent0
search_queries_agent1 = \
query.default_config['search_queries_per_interval'] - \
search_queries_agent0
enum_search_queries_agent1 = \
query.default_config['enum_search_queries_per_interval'] - \
enum_search_queries_agent0
enum_single_plot_queries_agent1 = \
query.default_config['enum_single_plot_queries_per_interval'] - \
enum_single_plot_queries_agent0
annotation_queries_agent1 = \
query.default_config['annotations_queries_per_interval'] - \
annotation_queries_agent0
enum_multi_plot_queries_agent1 = \
query.default_config['enum_multiplot_per_interval'] - \
enum_multi_plot_queries_agent0
self.assertEqual(
query.QueryThread.queries,
([query.SinglePlotQuery] * single_plot_queries_agent1 +
[query.MultiPlotQuery] * multi_plot_queries_agent1 +
[query.SearchQuery] * search_queries_agent1 +
[query.EnumSearchQuery] * enum_search_queries_agent1 +
[query.EnumSinglePlotQuery] * enum_single_plot_queries_agent1 +
[query.AnnotationsQuery] * annotation_queries_agent1) +
[query.EnumMultiPlotQuery] * enum_multi_plot_queries_agent1)
thread = query.QueryThread(0)
self.assertEqual(thread.slice, [query.SinglePlotQuery] * 2)
thread = query.QueryThread(4)
self.assertEqual(thread.slice, [query.MultiPlotQuery] * 2)
thread = query.QueryThread(6)
self.assertEqual(thread.slice, [query.SearchQuery] * 2)
thread = query.QueryThread(8)
self.assertEqual(thread.slice, [query.EnumSearchQuery] * 2)
thread = query.QueryThread(10)
self.assertEqual(thread.slice, [query.EnumSinglePlotQuery] * 2)
thread = query.QueryThread(12)
self.assertEqual(thread.slice, [query.AnnotationsQuery] * 1)
thread = query.QueryThread(16)
self.assertEqual(thread.slice, [query.EnumMultiPlotQuery] * 1)
def test_generate_payload(self):
self.tm.create_all_metrics(1)
thread = ingest.IngestThread(0)
payload = json.loads(
thread.generate_payload(0, [[2, 3], [2, 4], [2, 5]]))
valid_payload = [{u'collectionTime': 0,
u'metricName': u'int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.3',
u'metricValue': 0,
u'tenantId': u'2',
u'ttlInSeconds': 172800,
u'unit': u'days'},
{u'collectionTime': 0,
u'metricName': u'int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.4',
u'metricValue': 0,
u'tenantId': u'2',
u'ttlInSeconds': 172800,
u'unit': u'days'},
{u'collectionTime': 0,
u'metricName': u'int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.5',
u'metricValue': 0,
u'tenantId': u'2',
u'ttlInSeconds': 172800,
u'unit': u'days'}]
self.assertEqual(payload, valid_payload)
def test_generate_enum_payload(self):
self.tm.create_all_metrics(1)
thread = ingestenum.EnumIngestThread(0)
payload = json.loads(thread.generate_payload(1, [[2, 1], [2, 2]]))
valid_payload = [{u'timestamp': 1,
u'tenantId': u'2',
u'enums': [{u'value': u'e_g_1_0',
u'name': utils.generate_enum_metric_name(
1)}]},
{u'timestamp': 1,
u'tenantId': u'2',
u'enums': [{u'value': u'e_g_2_0',
u'name': utils.generate_enum_metric_name(
2)}]}
]
self.assertEqual(payload, valid_payload)
def test_generate_annotations_payload(self):
self.tm.create_all_metrics(1)
thread = annotationsingest.AnnotationsIngestThread(0)
payload = json.loads(thread.generate_payload(0, 3))
valid_payload = {
'what': 'annotation int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.3',
'when': 0,
'tags': 'tag',
'data': 'data'}
self.assertEqual(payload, valid_payload)
def test_annotationsingest_make_request(self):
global sleep_time
thread = annotationsingest.AnnotationsIngestThread(0)
thread.slice = [[2, 0]]
thread.position = 0
thread.finish_time = 10000
valid_payload = {
"what": "annotation int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.0",
"when": 1000, "tags": "tag", "data": "data"}
url, payload = thread.make_request(pp)
# confirm request generates proper URL and payload
self.assertEqual(
url,
'http://qe01.metrics-ingest.api.rackspacecloud.com/v2.0/2/events')
self.assertEqual(eval(payload), valid_payload)
# confirm request increments position if not at end of report interval
self.assertEqual(thread.position, 1)
self.assertEqual(thread.finish_time, 10000)
thread.position = 2
thread.make_request(pp)
# confirm request resets position at end of report interval
self.assertEqual(sleep_time, 9000)
self.assertEqual(thread.position, 1)
self.assertEqual(thread.finish_time, 16000)
def test_ingest_make_request(self):
global sleep_time
thread = ingest.IngestThread(0)
thread.slice = [[[2, 0], [2, 1]]]
thread.position = 0
thread.finish_time = 10000
valid_payload = [
{"collectionTime": 1000, "ttlInSeconds": 172800, "tenantId": "2",
"metricValue": 0, "unit": "days",
"metricName": "int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.0"},
{"collectionTime": 1000, "ttlInSeconds": 172800, "tenantId": "2",
"metricValue": 0, "unit": "days",
"metricName": "int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.1"}]
url, payload = thread.make_request(pp)
# confirm request generates proper URL and payload
self.assertEqual(url,
'http://qe01.metrics-ingest.api.rackspacecloud.com/v2.0/tenantId/ingest/multi')
self.assertEqual(eval(payload), valid_payload)
# confirm request increments position if not at end of report interval
self.assertEqual(thread.position, 1)
self.assertEqual(thread.finish_time, 10000)
thread.position = 2
thread.make_request(pp)
# confirm request resets position at end of report interval
self.assertEqual(sleep_time, 9000)
self.assertEqual(thread.position, 1)
self.assertEqual(thread.finish_time, 16000)
def test_ingest_enum_make_request(self):
global sleep_time
thread = ingestenum.EnumIngestThread(0)
thread.slice = [[[2, 0], [2, 1]]]
thread.position = 0
thread.finish_time = 10000
valid_payload = [{'tenantId': '2', 'timestamp': 1000, 'enums': [
{'value': 'e_g_0_0', 'name': utils.generate_enum_metric_name(0)}]},
{'tenantId': '2', 'timestamp': 1000, 'enums': [
{'value': 'e_g_1_0',
'name': utils.generate_enum_metric_name(1)}]}]
url, payload = thread.make_request(pp)
# confirm request generates proper URL and payload
self.assertEqual(url,
'http://qe01.metrics-ingest.api.rackspacecloud.com/v2.0/tenantId/ingest/aggregated/multi')
self.assertEqual(eval(payload), valid_payload)
# confirm request increments position if not at end of report interval
self.assertEqual(thread.position, 1)
self.assertEqual(thread.finish_time, 10000)
thread.position = 2
thread.make_request(pp)
# confirm request resets position at end of report interval
self.assertEqual(sleep_time, 9000)
self.assertEqual(thread.position, 1)
self.assertEqual(thread.finish_time, 16000)
def test_query_make_request(self):
thread = query.QueryThread(0)
thread.slice = [query.SinglePlotQuery, query.SearchQuery,
query.MultiPlotQuery, query.AnnotationsQuery,
query.EnumSearchQuery, query.EnumSinglePlotQuery,
query.EnumMultiPlotQuery]
thread.position = 0
thread.make_request(pp)
self.assertEqual(get_url,
"http://qe01.metrics.api.rackspacecloud.com/v2.0/0/views/int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.0?from=-86399000&to=1000&resolution=FULL")
random.randint = lambda x, y: 10
thread.make_request(pp)
self.assertEqual(get_url,
"http://qe01.metrics.api.rackspacecloud.com/v2.0/10/metrics/search?query=int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.*")
random.randint = lambda x, y: 20
thread.make_request(pp)
self.assertEqual(post_url,
"http://qe01.metrics.api.rackspacecloud.com/v2.0/20/views?from=-86399000&to=1000&resolution=FULL")
self.assertEqual(eval(post_payload), [
"int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.0",
"int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.1",
"int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.2",
"int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.3",
"int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.4",
"int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.5",
"int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.6",
"int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.7",
"int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.8",
"int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.9"])
random.randint = lambda x, y: 30
thread.make_request(pp)
self.assertEqual(get_url,
"http://qe01.metrics.api.rackspacecloud.com/v2.0/30/events/getEvents?from=-86399000&until=1000")
random.randint = lambda x, y: 40
thread.make_request(pp)
self.assertEqual(get_url,
"http://qe01.metrics.api.rackspacecloud.com/v2.0/40/metrics/search?query=enum_grinder_int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.*&include_enum_values=true")
random.randint = lambda x, y: 50
thread.make_request(pp)
self.assertEqual(get_url,
"http://qe01.metrics.api.rackspacecloud.com/v2.0/50/views/enum_grinder_int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.50?from=-86399000&to=1000&resolution=FULL")
random.randint = lambda x, y: 4
thread.make_request(pp)
self.assertEqual(post_url,
"http://qe01.metrics.api.rackspacecloud.com/v2.0/4/views?from=-86399000&to=1000&resolution=FULL")
self.assertEqual(eval(post_payload), [
"enum_grinder_int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.0",
"enum_grinder_int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.1",
"enum_grinder_int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.2",
"enum_grinder_int.abcdefg.hijklmnop.qrstuvw.xyz.ABCDEFG.HIJKLMNOP.QRSTUVW.XYZ.abcdefg.hijklmnop.qrstuvw.xyz.met.3"])
def tearDown(self):
random.shuffle = self.real_shuffle
random.randint = self.real_randint
utils.AbstractThread.time = self.real_time
utils.AbstractThread.sleep = self.real_sleep
# if __name__ == '__main__':
unittest.TextTestRunner().run(
unittest.TestLoader().loadTestsFromTestCase(BluefloodTests))
cov.stop()
cov.save()
class TestRunner:
def __init__(self):
pass
def __call__(self):
pass
|
{
"content_hash": "b3d4479260b551b15b7da61fc7118989",
"timestamp": "",
"source": "github",
"line_count": 523,
"max_line_length": 237,
"avg_line_length": 44.959847036328874,
"alnum_prop": 0.5921578634005273,
"repo_name": "VinnyQ/blueflood",
"id": "b06876c815f9aeafe39badcf80cdc2bddcca64f3",
"size": "23633",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/grinder/scripts/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "193"
},
{
"name": "Java",
"bytes": "2626579"
},
{
"name": "JavaScript",
"bytes": "16131"
},
{
"name": "Python",
"bytes": "91395"
},
{
"name": "Ruby",
"bytes": "7771"
},
{
"name": "Shell",
"bytes": "30730"
}
],
"symlink_target": ""
}
|
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.5.1-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class NullSCM(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_class': 'str'
}
attribute_map = {
'_class': '_class'
}
def __init__(self, _class=None, local_vars_configuration=None): # noqa: E501
"""NullSCM - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self.__class = None
self.discriminator = None
if _class is not None:
self._class = _class
@property
def _class(self):
"""Gets the _class of this NullSCM. # noqa: E501
:return: The _class of this NullSCM. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this NullSCM.
:param _class: The _class of this NullSCM. # noqa: E501
:type _class: str
"""
self.__class = _class
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NullSCM):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NullSCM):
return True
return self.to_dict() != other.to_dict()
|
{
"content_hash": "40049bfa49bc85f0a82e0328d8912410",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 85,
"avg_line_length": 27.723076923076924,
"alnum_prop": 0.5477247502774695,
"repo_name": "cliffano/swaggy-jenkins",
"id": "6feaf46d4ab0bdb114ee593e7b5c13c7b99979db",
"size": "3621",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "clients/python-legacy/generated/openapi_client/models/null_scm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "569823"
},
{
"name": "Apex",
"bytes": "741346"
},
{
"name": "Batchfile",
"bytes": "14792"
},
{
"name": "C",
"bytes": "971274"
},
{
"name": "C#",
"bytes": "5131336"
},
{
"name": "C++",
"bytes": "7799032"
},
{
"name": "CMake",
"bytes": "20609"
},
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Clojure",
"bytes": "129018"
},
{
"name": "Crystal",
"bytes": "864941"
},
{
"name": "Dart",
"bytes": "876777"
},
{
"name": "Dockerfile",
"bytes": "7385"
},
{
"name": "Eiffel",
"bytes": "424642"
},
{
"name": "Elixir",
"bytes": "139252"
},
{
"name": "Elm",
"bytes": "187067"
},
{
"name": "Emacs Lisp",
"bytes": "191"
},
{
"name": "Erlang",
"bytes": "373074"
},
{
"name": "F#",
"bytes": "556012"
},
{
"name": "Gherkin",
"bytes": "951"
},
{
"name": "Go",
"bytes": "345227"
},
{
"name": "Groovy",
"bytes": "89524"
},
{
"name": "HTML",
"bytes": "2367424"
},
{
"name": "Haskell",
"bytes": "680841"
},
{
"name": "Java",
"bytes": "12164874"
},
{
"name": "JavaScript",
"bytes": "1959006"
},
{
"name": "Kotlin",
"bytes": "1280953"
},
{
"name": "Lua",
"bytes": "322316"
},
{
"name": "Makefile",
"bytes": "11882"
},
{
"name": "Nim",
"bytes": "65818"
},
{
"name": "OCaml",
"bytes": "94665"
},
{
"name": "Objective-C",
"bytes": "464903"
},
{
"name": "PHP",
"bytes": "4383673"
},
{
"name": "Perl",
"bytes": "743304"
},
{
"name": "PowerShell",
"bytes": "678274"
},
{
"name": "Python",
"bytes": "5529523"
},
{
"name": "QMake",
"bytes": "6915"
},
{
"name": "R",
"bytes": "840841"
},
{
"name": "Raku",
"bytes": "10945"
},
{
"name": "Ruby",
"bytes": "328360"
},
{
"name": "Rust",
"bytes": "1735375"
},
{
"name": "Scala",
"bytes": "1387368"
},
{
"name": "Shell",
"bytes": "407167"
},
{
"name": "Swift",
"bytes": "342562"
},
{
"name": "TypeScript",
"bytes": "3060093"
}
],
"symlink_target": ""
}
|
"""
WSGI config for django_shopfront project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_shopfront.settings")
application = get_wsgi_application()
|
{
"content_hash": "15887500e9b941a6c61038c4af76f6a5",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.625,
"alnum_prop": 0.775609756097561,
"repo_name": "rapilabs/django-shopfront",
"id": "8d1f3b5c498741f67f58f5c9ad90d4de24a8afc6",
"size": "410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/django_shopfront/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2536"
},
{
"name": "HTML",
"bytes": "591"
},
{
"name": "JavaScript",
"bytes": "35941"
},
{
"name": "Python",
"bytes": "31111"
}
],
"symlink_target": ""
}
|
"""
various utilities
"""
import os
import logging
import urllib
from urllib.request import urlopen
from email.utils import formataddr
from email.mime.text import MIMEText
from configparser import ConfigParser
__all__ = ["Configuration", "get_page", "setup_logging"]
class Borg(object):
""" A class which "shares state" among its instances. """
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
class Configuration(Borg):
"""
Global configuration class.
This is a subclass of Borg. It can only be initialized only once.
"""
def initialize(self, config_file):
""" Should only be called once. """
parser = ConfigParser()
parser.read(os.path.realpath(config_file))
self.username = parser.get("email", "username")
self.password = parser.get("email", "password")
self.recipients = [line for line in parser.get("email", "recipients").splitlines() if line]
self.input_file = parser.get("general", "input_file")
def get_page(url):
""" Return the response of the given `url` as a string. """
response = urlopen(url)
response_code = response.getcode()
text = response.read().decode('utf8')
return text
def setup_logging():
""" setup logging. """
# initialize the root logger
logger = logging.getLogger()
logger.setLevel(0) # Yes, this is necessary! I think it has to do with propagate etc
# Create the formatter
formatter = logging.Formatter(
fmt='%(asctime)s; %(levelname)-8s; %(name)-15s; %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
)
# create console handler
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(logging.DEBUG)
console_handler.setLevel(logging.INFO)
# add the handlers to logger
logger.addHandler(console_handler)
|
{
"content_hash": "e9847afb5188d16f5fca1a325747b876",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 99,
"avg_line_length": 27.257142857142856,
"alnum_prop": 0.6546121593291404,
"repo_name": "pmav99/check_prices",
"id": "3becfd1e7c4c0982fe87ee387a9a447c0098c34d",
"size": "1955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6526"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from mkt.webapps.serializers import SimpleAppSerializer, SimpleESAppSerializer
from mkt.websites.serializers import ESWebsiteSerializer, WebsiteSerializer
class TVAppSerializer(SimpleAppSerializer):
tv_featured = serializers.IntegerField()
class Meta(SimpleAppSerializer.Meta):
fields = ['author', 'categories',
'content_ratings', 'current_version', 'description',
'file_size', 'homepage', 'icons', 'id',
'last_updated', 'manifest_url', 'name', 'privacy_policy',
'promo_imgs', 'public_stats', 'release_notes',
'ratings', 'slug', 'status', 'support_email', 'support_url',
'tags', 'tv_featured', 'user']
exclude = []
def get_icons(self, obj):
return {336: obj.get_icon_url(336), 128: obj.get_icon_url(128)}
class TVESAppSerializer(SimpleESAppSerializer):
tv_featured = serializers.IntegerField()
class Meta(SimpleESAppSerializer.Meta):
fields = TVAppSerializer.Meta.fields
exclude = TVAppSerializer.Meta.exclude
def get_user_info(self, app):
# TV search should always be anonymous for extra-cacheability.
return None
def get_icons(self, obj):
return {336: obj.get_icon_url(336), 128: obj.get_icon_url(128)}
class TVWebsiteSerializer(WebsiteSerializer):
tv_featured = serializers.IntegerField()
class Meta(WebsiteSerializer.Meta):
fields = ['categories', 'description', 'developer_name', 'icons', 'id',
'keywords', 'name', 'promo_imgs', 'short_name',
'tv_featured', 'tv_url', 'url']
def get_icons(self, obj):
return {336: obj.get_icon_url(336), 128: obj.get_icon_url(128)}
class TVESWebsiteSerializer(ESWebsiteSerializer):
tv_featured = serializers.IntegerField()
class Meta(ESWebsiteSerializer.Meta):
fields = TVWebsiteSerializer.Meta.fields
def get_icons(self, obj):
return {336: obj.get_icon_url(336), 128: obj.get_icon_url(128)}
|
{
"content_hash": "d2a0f9003624bb05854c7b93bc195015",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 36.45614035087719,
"alnum_prop": 0.6467757459095284,
"repo_name": "diox/zamboni",
"id": "2e11ffe79d2e05651e77fb7ff26ea4e0e9381cec",
"size": "2078",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mkt/tvplace/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "354315"
},
{
"name": "HTML",
"bytes": "2333064"
},
{
"name": "JavaScript",
"bytes": "529996"
},
{
"name": "Makefile",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "4535722"
},
{
"name": "Shell",
"bytes": "11147"
},
{
"name": "Smarty",
"bytes": "1159"
}
],
"symlink_target": ""
}
|
import smtplib
from email.mime.text import MIMEText
from config import GMAIL_CONFIG
def send_mail(to_list,sub,content):
'''
to_list:发给谁
sub:主题
content:内容
send_mail("zhkzyth@gmail.com","sub","content")
'''
mail_host = GMAIL_CONFIG['mail_host']
mail_port = GMAIL_CONFIG['mail_port']
mail_user = GMAIL_CONFIG['mail_user']
mail_pass = GMAIL_CONFIG['mail_pass']
mail_postfix = GMAIL_CONFIG['mail_postfix']
me=mail_user+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content)
msg['Subject'] = sub
msg['From'] = me
msg['To'] = ";".join(to_list)
try:
s = smtplib.SMTP()
s.connect(mail_host,mail_port)
s.starttls()
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception, e:
print str(e)
return False
|
{
"content_hash": "c31b0b27c2b4a59b64a503073670c9af",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 51,
"avg_line_length": 26.323529411764707,
"alnum_prop": 0.5910614525139665,
"repo_name": "zhkzyth/a-super-fast-crawler",
"id": "e89c85c69693f27ee33f482edc8a0f001b591a3e",
"size": "949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48239"
}
],
"symlink_target": ""
}
|
"""A converter from a tf1 ALBERT encoder checkpoint to a tf2 encoder checkpoint.
The conversion will yield an object-oriented checkpoint that can be used
to restore a AlbertTransformerEncoder object.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from official.modeling import activations
from official.nlp.albert import configs
from official.nlp.bert import tf1_checkpoint_converter_lib
from official.nlp.modeling import networks
FLAGS = flags.FLAGS
flags.DEFINE_string("albert_config_file", None,
"Albert configuration file to define core bert layers.")
flags.DEFINE_string(
"checkpoint_to_convert", None,
"Initial checkpoint from a pretrained BERT model core (that is, only the "
"BertModel, with no task heads.)")
flags.DEFINE_string("converted_checkpoint_path", None,
"Name for the created object-based V2 checkpoint.")
ALBERT_NAME_REPLACEMENTS = (
("bert/encoder/", ""),
("bert/", ""),
("embeddings/word_embeddings", "word_embeddings/embeddings"),
("embeddings/position_embeddings", "position_embedding/embeddings"),
("embeddings/token_type_embeddings", "type_embeddings/embeddings"),
("embeddings/LayerNorm", "embeddings/layer_norm"),
("embedding_hidden_mapping_in", "embedding_projection"),
("group_0/inner_group_0/", ""),
("attention_1/self", "self_attention"),
("attention_1/output/dense", "self_attention/attention_output"),
("LayerNorm/", "self_attention_layer_norm/"),
("ffn_1/intermediate/dense", "intermediate"),
("ffn_1/intermediate/output/dense", "output"),
("LayerNorm_1/", "output_layer_norm/"),
("pooler/dense", "pooler_transform"),
("cls/predictions/output_bias", "cls/predictions/output_bias/bias"),
("cls/seq_relationship/output_bias", "predictions/transform/logits/bias"),
("cls/seq_relationship/output_weights",
"predictions/transform/logits/kernel"),
)
def _create_albert_model(cfg):
"""Creates a BERT keras core model from BERT configuration.
Args:
cfg: A `BertConfig` to create the core model.
Returns:
A keras model.
"""
albert_encoder = networks.AlbertTransformerEncoder(
vocab_size=cfg.vocab_size,
hidden_size=cfg.hidden_size,
embedding_width=cfg.embedding_size,
num_layers=cfg.num_hidden_layers,
num_attention_heads=cfg.num_attention_heads,
intermediate_size=cfg.intermediate_size,
activation=activations.gelu,
dropout_rate=cfg.hidden_dropout_prob,
attention_dropout_rate=cfg.attention_probs_dropout_prob,
sequence_length=cfg.max_position_embeddings,
type_vocab_size=cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=cfg.initializer_range))
return albert_encoder
def convert_checkpoint(bert_config, output_path, v1_checkpoint):
"""Converts a V1 checkpoint into an OO V2 checkpoint."""
output_dir, _ = os.path.split(output_path)
# Create a temporary V1 name-converted checkpoint in the output directory.
temporary_checkpoint_dir = os.path.join(output_dir, "temp_v1")
temporary_checkpoint = os.path.join(temporary_checkpoint_dir, "ckpt")
tf1_checkpoint_converter_lib.convert(
checkpoint_from_path=v1_checkpoint,
checkpoint_to_path=temporary_checkpoint,
num_heads=bert_config.num_attention_heads,
name_replacements=ALBERT_NAME_REPLACEMENTS,
permutations=tf1_checkpoint_converter_lib.BERT_V2_PERMUTATIONS,
exclude_patterns=["adam", "Adam"])
# Create a V2 checkpoint from the temporary checkpoint.
model = _create_albert_model(bert_config)
tf1_checkpoint_converter_lib.create_v2_checkpoint(model, temporary_checkpoint,
output_path)
# Clean up the temporary checkpoint, if it exists.
try:
tf.io.gfile.rmtree(temporary_checkpoint_dir)
except tf.errors.OpError:
# If it doesn't exist, we don't need to clean it up; continue.
pass
def main(_):
output_path = FLAGS.converted_checkpoint_path
v1_checkpoint = FLAGS.checkpoint_to_convert
albert_config = configs.AlbertConfig.from_json_file(FLAGS.albert_config_file)
convert_checkpoint(albert_config, output_path, v1_checkpoint)
if __name__ == "__main__":
app.run(main)
|
{
"content_hash": "506ccf56de154ef3ba1bad3e8de6eecf",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 80,
"avg_line_length": 37.26271186440678,
"alnum_prop": 0.7088924266545372,
"repo_name": "tombstone/models",
"id": "402bc1445bed575362598d09212d14d03b629179",
"size": "5086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "official/nlp/albert/tf2_albert_encoder_checkpoint_converter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/misc/shared_mt_flatbread.iff"
result.attribute_template_id = -1
result.stfName("item_n","mt_flatbread")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "638f1dcfdc3c9a350ace9529a9c66bec",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 70,
"avg_line_length": 23.076923076923077,
"alnum_prop": 0.69,
"repo_name": "obi-two/Rebelion",
"id": "cbb73781f986ff1c0804f1332df42f1bdbd7516d",
"size": "445",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/loot/misc/shared_mt_flatbread.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""An I/O event loop for non-blocking sockets.
In Tornado 6.0, `.IOLoop` is a wrapper around the `asyncio` event
loop, with a slightly different interface for historical reasons.
Applications can use either the `.IOLoop` interface or the underlying
`asyncio` event loop directly (unless compatibility with older
versions of Tornado is desired, in which case `.IOLoop` must be used).
Typical applications will use a single `IOLoop` object, accessed via
`IOLoop.current` class method. The `IOLoop.start` method (or
equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually
be called at the end of the ``main()`` function. Atypical applications
may use more than one `IOLoop`, such as one `IOLoop` per thread, or
per `unittest` case.
"""
import asyncio
import concurrent.futures
import datetime
import functools
import logging
import numbers
import os
import sys
import time
import math
import random
import warnings
from inspect import isawaitable
from tornado.concurrent import (
Future,
is_future,
chain_future,
future_set_exc_info,
future_add_done_callback,
)
from tornado.log import app_log
from tornado.util import Configurable, TimeoutError, import_object
import typing
from typing import Union, Any, Type, Optional, Callable, TypeVar, Tuple, Awaitable
if typing.TYPE_CHECKING:
from typing import Dict, List # noqa: F401
from typing_extensions import Protocol
else:
Protocol = object
class _Selectable(Protocol):
def fileno(self) -> int:
pass
def close(self) -> None:
pass
_T = TypeVar("_T")
_S = TypeVar("_S", bound=_Selectable)
class IOLoop(Configurable):
"""An I/O event loop.
As of Tornado 6.0, `IOLoop` is a wrapper around the `asyncio` event
loop.
Example usage for a simple TCP server:
.. testcode::
import asyncio
import errno
import functools
import socket
import tornado.ioloop
from tornado.iostream import IOStream
async def handle_connection(connection, address):
stream = IOStream(connection)
message = await stream.read_until_close()
print("message from client:", message.decode().strip())
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except BlockingIOError:
return
connection.setblocking(0)
io_loop = tornado.ioloop.IOLoop.current()
io_loop.spawn_callback(handle_connection, connection, address)
async def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", 8888))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
await asyncio.Event().wait()
if __name__ == "__main__":
asyncio.run(main())
.. testoutput::
:hide:
Do not attempt to construct an `IOLoop` directly; this is deprecated
since Tornado 6.2. Instead, initialize the `asyncio` event loop and
use `IOLoop.current()` to access an `IOLoop` wrapper around the
current event loop.
In general, an `IOLoop` cannot survive a fork or be shared across
processes in any way. When multiple processes are being used, each
process should create its own `IOLoop`, which also implies that
any objects which depend on the `IOLoop` (such as
`.AsyncHTTPClient`) must also be created in the child processes.
As a guideline, anything that starts processes (including the
`tornado.process` and `multiprocessing` modules) should do so as
early as possible, ideally the first thing the application does
after loading its configuration in ``main()``.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
.. versionchanged:: 5.0
Uses the `asyncio` event loop by default. The
``IOLoop.configure`` method cannot be used on Python 3 except
to redundantly specify the `asyncio` event loop.
.. deprecated:: 6.2
It is deprecated to create an event loop that is "current" but not
currently running. This means it is deprecated to pass
``make_current=True`` to the ``IOLoop`` constructor, or to create
an ``IOLoop`` while no asyncio event loop is running unless
``make_current=False`` is used.
"""
# These constants were originally based on constants from the epoll module.
NONE = 0
READ = 0x001
WRITE = 0x004
ERROR = 0x018
# In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops.
_ioloop_for_asyncio = dict() # type: Dict[asyncio.AbstractEventLoop, IOLoop]
@classmethod
def configure(
cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any
) -> None:
if asyncio is not None:
from tornado.platform.asyncio import BaseAsyncIOLoop
if isinstance(impl, str):
impl = import_object(impl)
if isinstance(impl, type) and not issubclass(impl, BaseAsyncIOLoop):
raise RuntimeError(
"only AsyncIOLoop is allowed when asyncio is available"
)
super(IOLoop, cls).configure(impl, **kwargs)
@staticmethod
def instance() -> "IOLoop":
"""Deprecated alias for `IOLoop.current()`.
.. versionchanged:: 5.0
Previously, this method returned a global singleton
`IOLoop`, in contrast with the per-thread `IOLoop` returned
by `current()`. In nearly all cases the two were the same
(when they differed, it was generally used from non-Tornado
threads to communicate back to the main thread's `IOLoop`).
This distinction is not present in `asyncio`, so in order
to facilitate integration with that package `instance()`
was changed to be an alias to `current()`. Applications
using the cross-thread communications aspect of
`instance()` should instead set their own global variable
to point to the `IOLoop` they want to use.
.. deprecated:: 5.0
"""
return IOLoop.current()
def install(self) -> None:
"""Deprecated alias for `make_current()`.
.. versionchanged:: 5.0
Previously, this method would set this `IOLoop` as the
global singleton used by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`, `install()`
is an alias for `make_current()`.
.. deprecated:: 5.0
"""
self.make_current()
@staticmethod
def clear_instance() -> None:
"""Deprecated alias for `clear_current()`.
.. versionchanged:: 5.0
Previously, this method would clear the `IOLoop` used as
the global singleton by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`,
`clear_instance()` is an alias for `clear_current()`.
.. deprecated:: 5.0
"""
IOLoop.clear_current()
@typing.overload
@staticmethod
def current() -> "IOLoop":
pass
@typing.overload
@staticmethod
def current(instance: bool = True) -> Optional["IOLoop"]: # noqa: F811
pass
@staticmethod
def current(instance: bool = True) -> Optional["IOLoop"]: # noqa: F811
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop` and ``instance`` is true, creates one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
.. versionchanged:: 5.0
On Python 3, control of the current `IOLoop` is delegated
to `asyncio`, with this and other methods as pass-through accessors.
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method). ``instance=False`` is deprecated,
since even if we do not create an `IOLoop`, this method
may initialize the asyncio loop.
.. deprecated:: 6.2
It is deprecated to call ``IOLoop.current()`` when no `asyncio`
event loop is running.
"""
try:
loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
if not instance:
return None
raise
try:
return IOLoop._ioloop_for_asyncio[loop]
except KeyError:
if instance:
from tornado.platform.asyncio import AsyncIOMainLoop
current = AsyncIOMainLoop(make_current=True) # type: Optional[IOLoop]
else:
current = None
return current
def make_current(self) -> None:
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
.. versionchanged:: 5.0
This method also sets the current `asyncio` event loop.
.. deprecated:: 6.2
The concept of an event loop that is "current" without
currently running is deprecated in asyncio since Python
3.10. All related functionality in Tornado is also
deprecated. Instead, start the event loop with `asyncio.run`
before interacting with it.
"""
# The asyncio event loops override this method.
raise NotImplementedError()
@staticmethod
def clear_current() -> None:
"""Clears the `IOLoop` for the current thread.
Intended primarily for use by test frameworks in between tests.
.. versionchanged:: 5.0
This method also clears the current `asyncio` event loop.
.. deprecated:: 6.2
"""
warnings.warn("clear_current is deprecated", DeprecationWarning)
old = IOLoop.current(instance=False)
if old is not None:
old._clear_current_hook()
if asyncio is None:
IOLoop._current.instance = None
def _clear_current_hook(self) -> None:
"""Instance method called when an IOLoop ceases to be current.
May be overridden by subclasses as a counterpart to make_current.
"""
pass
@classmethod
def configurable_base(cls) -> Type[Configurable]:
return IOLoop
@classmethod
def configurable_default(cls) -> Type[Configurable]:
from tornado.platform.asyncio import AsyncIOLoop
return AsyncIOLoop
def initialize(self, make_current: Optional[bool] = None) -> None:
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
current = IOLoop.current(instance=False)
# AsyncIO loops can already be current by this point.
if current is not None and current is not self:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds: bool = False) -> None:
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
@typing.overload
def add_handler(
self, fd: int, handler: Callable[[int, int], None], events: int
) -> None:
pass
@typing.overload # noqa: F811
def add_handler(
self, fd: _S, handler: Callable[[_S, int], None], events: int
) -> None:
pass
def add_handler( # noqa: F811
self, fd: Union[int, _Selectable], handler: Callable[..., None], events: int
) -> None:
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` and ``close()`` method.
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd: Union[int, _Selectable], events: int) -> None:
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd: Union[int, _Selectable]) -> None:
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def start(self) -> None:
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self) -> None:
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any(
[
logging.getLogger().handlers,
logging.getLogger("tornado").handlers,
logging.getLogger("tornado.application").handlers,
]
):
logging.basicConfig()
def stop(self) -> None:
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func: Callable, timeout: Optional[float] = None) -> Any:
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either an awaitable object or
``None``. If the function returns an awaitable object, the
`IOLoop` will run until the awaitable is resolved (and
`run_sync()` will return the awaitable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `asyncio.TimeoutError` is raised.
This method is useful to allow asynchronous calls in a
``main()`` function::
async def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-awaitable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled.
.. versionchanged:: 6.2
``tornado.util.TimeoutError`` is now an alias to ``asyncio.TimeoutError``.
"""
future_cell = [None] # type: List[Optional[Future]]
def run() -> None:
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
fut = Future() # type: Future[Any]
future_cell[0] = fut
future_set_exc_info(fut, sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
fut = Future()
future_cell[0] = fut
fut.set_result(result)
assert future_cell[0] is not None
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
def timeout_callback() -> None:
# If we can cancel the future, do so and wait on it. If not,
# Just stop the loop and return with the task still pending.
# (If we neither cancel nor wait for the task, a warning
# will be logged).
assert future_cell[0] is not None
if not future_cell[0].cancel():
self.stop()
timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
assert future_cell[0] is not None
if future_cell[0].cancelled() or not future_cell[0].done():
raise TimeoutError("Operation timed out after %s seconds" % timeout)
return future_cell[0].result()
def time(self) -> float:
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
Historically, the IOLoop could be customized to use e.g.
`time.monotonic` instead of `time.time`, but this is not
currently supported and so this method is equivalent to
`time.time`.
"""
return time.time()
def add_timeout(
self,
deadline: Union[float, datetime.timedelta],
callback: Callable,
*args: Any,
**kwargs: Any
) -> object:
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(
self.time() + deadline.total_seconds(), callback, *args, **kwargs
)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(
self, delay: float, callback: Callable, *args: Any, **kwargs: Any
) -> object:
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(
self, when: float, callback: Callable, *args: Any, **kwargs: Any
) -> object:
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout: object) -> None:
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback: Callable, *args: Any, **kwargs: Any) -> None:
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(
self, callback: Callable, *args: Any, **kwargs: Any
) -> None:
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
"""
raise NotImplementedError()
def spawn_callback(self, callback: Callable, *args: Any, **kwargs: Any) -> None:
"""Calls the given callback on the next IOLoop iteration.
As of Tornado 6.0, this method is equivalent to `add_callback`.
.. versionadded:: 4.0
"""
self.add_callback(callback, *args, **kwargs)
def add_future(
self,
future: "Union[Future[_T], concurrent.futures.Future[_T]]",
callback: Callable[["Future[_T]"], None],
) -> None:
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
This method only accepts `.Future` objects and not other
awaitables (unlike most of Tornado where the two are
interchangeable).
"""
if isinstance(future, Future):
# Note that we specifically do not want the inline behavior of
# tornado.concurrent.future_add_done_callback. We always want
# this callback scheduled on the next IOLoop iteration (which
# asyncio.Future always does).
#
# Wrap the callback in self._run_callback so we control
# the error logging (i.e. it goes to tornado.log.app_log
# instead of asyncio's log).
future.add_done_callback(
lambda f: self._run_callback(functools.partial(callback, future))
)
else:
assert is_future(future)
# For concurrent futures, we use self.add_callback, so
# it's fine if future_add_done_callback inlines that call.
future_add_done_callback(
future, lambda f: self.add_callback(callback, future)
)
def run_in_executor(
self,
executor: Optional[concurrent.futures.Executor],
func: Callable[..., _T],
*args: Any
) -> Awaitable[_T]:
"""Runs a function in a ``concurrent.futures.Executor``. If
``executor`` is ``None``, the IO loop's default executor will be used.
Use `functools.partial` to pass keyword arguments to ``func``.
.. versionadded:: 5.0
"""
if executor is None:
if not hasattr(self, "_executor"):
from tornado.process import cpu_count
self._executor = concurrent.futures.ThreadPoolExecutor(
max_workers=(cpu_count() * 5)
) # type: concurrent.futures.Executor
executor = self._executor
c_future = executor.submit(func, *args)
# Concurrent Futures are not usable with await. Wrap this in a
# Tornado Future instead, using self.add_future for thread-safety.
t_future = Future() # type: Future[_T]
self.add_future(c_future, lambda f: chain_future(f, t_future))
return t_future
def set_default_executor(self, executor: concurrent.futures.Executor) -> None:
"""Sets the default executor to use with :meth:`run_in_executor`.
.. versionadded:: 5.0
"""
self._executor = executor
def _run_callback(self, callback: Callable[[], Any]) -> None:
"""Runs a callback with error handling.
.. versionchanged:: 6.0
CancelledErrors are no longer logged.
"""
try:
ret = callback()
if ret is not None:
from tornado import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except asyncio.CancelledError:
pass
except Exception:
app_log.error("Exception in callback %r", callback, exc_info=True)
def _discard_future_result(self, future: Future) -> None:
"""Avoid unhandled-exception warnings from spawned coroutines."""
future.result()
def split_fd(
self, fd: Union[int, _Selectable]
) -> Tuple[int, Union[int, _Selectable]]:
# """Returns an (fd, obj) pair from an ``fd`` parameter.
# We accept both raw file descriptors and file-like objects as
# input to `add_handler` and related methods. When a file-like
# object is passed, we must retain the object itself so we can
# close it correctly when the `IOLoop` shuts down, but the
# poller interfaces favor file descriptors (they will accept
# file-like objects and call ``fileno()`` for you, but they
# always return the descriptor itself).
# This method is provided for use by `IOLoop` subclasses and should
# not generally be used by application code.
# .. versionadded:: 4.0
# """
if isinstance(fd, int):
return fd, fd
return fd.fileno(), fd
def close_fd(self, fd: Union[int, _Selectable]) -> None:
# """Utility method to close an ``fd``.
# If ``fd`` is a file-like object, we close it directly; otherwise
# we use `os.close`.
# This method is provided for use by `IOLoop` subclasses (in
# implementations of ``IOLoop.close(all_fds=True)`` and should
# not generally be used by application code.
# .. versionadded:: 4.0
# """
try:
if isinstance(fd, int):
os.close(fd)
else:
fd.close()
except OSError:
pass
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ["deadline", "callback", "tdeadline"]
def __init__(
self, deadline: float, callback: Callable[[], None], io_loop: IOLoop
) -> None:
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (
deadline,
next(io_loop._timeout_counter),
) # type: Tuple[float, int]
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other: "_Timeout") -> bool:
return self.tdeadline < other.tdeadline
def __le__(self, other: "_Timeout") -> bool:
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds when
``callback_time`` is a float. Note that the timeout is given in
milliseconds, while most other time-related functions in Tornado use
seconds. ``callback_time`` may alternatively be given as a
`datetime.timedelta` object.
If ``jitter`` is specified, each callback time will be randomly selected
within a window of ``jitter * callback_time`` milliseconds.
Jitter can be used to reduce alignment of events with similar periods.
A jitter of 0.1 means allowing a 10% variation in callback time.
The window is centered on ``callback_time`` so the total number of calls
within a given interval should not be significantly affected by adding
jitter.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. versionchanged:: 5.1
The ``jitter`` argument is added.
.. versionchanged:: 6.2
If the ``callback`` argument is a coroutine, and a callback runs for
longer than ``callback_time``, subsequent invocations will be skipped.
Previously this was only true for regular functions, not coroutines,
which were "fire-and-forget" for `PeriodicCallback`.
"""
def __init__(
self,
callback: Callable[[], Optional[Awaitable]],
callback_time: Union[datetime.timedelta, float],
jitter: float = 0,
) -> None:
self.callback = callback
if isinstance(callback_time, datetime.timedelta):
self.callback_time = callback_time / datetime.timedelta(milliseconds=1)
else:
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.jitter = jitter
self._running = False
self._timeout = None # type: object
def start(self) -> None:
"""Starts the timer."""
# Looking up the IOLoop here allows to first instantiate the
# PeriodicCallback in another thread, then start it using
# IOLoop.add_callback().
self.io_loop = IOLoop.current()
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self) -> None:
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self) -> bool:
"""Returns ``True`` if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
async def _run(self) -> None:
if not self._running:
return
try:
val = self.callback()
if val is not None and isawaitable(val):
await val
except Exception:
app_log.error("Exception in callback %r", self.callback, exc_info=True)
finally:
self._schedule_next()
def _schedule_next(self) -> None:
if self._running:
self._update_next(self.io_loop.time())
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
def _update_next(self, current_time: float) -> None:
callback_time_sec = self.callback_time / 1000.0
if self.jitter:
# apply jitter fraction
callback_time_sec *= 1 + (self.jitter * (random.random() - 0.5))
if self._next_timeout <= current_time:
# The period should be measured from the start of one call
# to the start of the next. If one call takes too long,
# skip cycles to get back to a multiple of the original
# schedule.
self._next_timeout += (
math.floor((current_time - self._next_timeout) / callback_time_sec) + 1
) * callback_time_sec
else:
# If the clock moved backwards, ensure we advance the next
# timeout instead of recomputing the same value again.
# This may result in long gaps between callbacks if the
# clock jumps backwards by a lot, but the far more common
# scenario is a small NTP adjustment that should just be
# ignored.
#
# Note that on some systems if time.time() runs slower
# than time.monotonic() (most common on windows), we
# effectively experience a small backwards time jump on
# every iteration because PeriodicCallback uses
# time.time() while asyncio schedules callbacks using
# time.monotonic().
# https://github.com/tornadoweb/tornado/issues/2333
self._next_timeout += callback_time_sec
|
{
"content_hash": "c8e21d13c24c676be5d367713096c23b",
"timestamp": "",
"source": "github",
"line_count": 969,
"max_line_length": 88,
"avg_line_length": 37.60061919504644,
"alnum_prop": 0.6072732262933992,
"repo_name": "lilydjwg/tornado",
"id": "cf5a85c9f2a28968844e1bad97660f774fbda20b",
"size": "37010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornado/ioloop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1524"
},
{
"name": "Cython",
"bytes": "780"
},
{
"name": "HTML",
"bytes": "25"
},
{
"name": "Python",
"bytes": "1551877"
},
{
"name": "Shell",
"bytes": "4070"
}
],
"symlink_target": ""
}
|
'''
Script to plot evolution of parameters in neurotune
'''
import math
import numpy as np
def plot_generation_evolution(sim_var_names,
target_values = {},
individuals_file_name = '../data/ga_individuals.csv',
show_plot_already = True,
save_to_file=False,
save_to_file_hist=False,
title_prefix = ""):
import matplotlib.pyplot as pylab
individuals_file = open(individuals_file_name)
generations = []
generations_all = []
generations_offset = []
f = []
val_num = len(sim_var_names)
nrows = math.ceil(math.sqrt(val_num))
ncols = math.ceil(val_num/nrows)
if val_num<=3 or val_num==5:
nrows=val_num; ncols =1
if val_num==4:
nrows=2; ncols =2
if val_num==6:
nrows=3; ncols =2
if val_num==7 or val_num==8:
nrows=4; ncols =2
if val_num==9:
nrows=3; ncols = 3
if val_num==10:
nrows=5; ncols =2
population_total = 0
generations_total = 0
for line in individuals_file:
generation = int(line.split(',')[0])
if generation==0:
population_total+=1
generations_total = generation
print("Generating plots for %s variables over %s generations with population %s"%(val_num,generations_total,population_total))
print("Vals shown in %i rows x %i columns"%(nrows,ncols))
vals = {}
colours = {}
sizes = {}
ind_vals = {}
for i in range(val_num):
vals[i]=[]
colours[i]=[]
sizes[i]=[]
ind_vals[i]={}
individuals_file = open(individuals_file_name)
for line in individuals_file:
main_info = line.split('[')[0]
values = line.split('[')[1]
generation = int(main_info.split(',')[0])
individual = int(main_info.split(',')[1].strip())
fitness = float(main_info.split(',')[2].strip())
if individual == 0:
print("Generation %s..."%generation)
generations.append(generation)
generations_all.append(generation)
generations_offset.append(generation+(individual/40.0))
f.append(fitness)
val_strings = values[:-2].split(',')
for v in range(len(val_strings)):
value = float(val_strings[v].strip())
if individual == 0:
ind_vals[v][generation] = []
ind_vals[v][generation].append(value)
vals[v].append(value)
colours[v].append(individual)
sizes[v].append((population_total-individual)*2)
fig = pylab.figure()
fig.canvas.set_window_title(title_prefix+" Evolution over %i generations of %s"%(generations_total, sim_var_names))
for i in range(val_num):
pylab.subplot(nrows, ncols, i+1)
pylab.title(sim_var_names[i])
if target_values is not None and target_values.has_key(sim_var_names[i]):
value = target_values[sim_var_names[i]]
x = [-1,generations_total+1]
y = [value,value]
pylab.plot(x,y,'--', color='grey')
pylab.scatter(generations_offset, vals[i], s=sizes[i], c=colours[i], alpha=0.4)
if i==0:
pylab.xlabel("Generation (%i individuals, offset slightly; larger circle => fitter)"%(population_total))
fig = pylab.figure()
fig.canvas.set_window_title(title_prefix+" Fitness over %i generations from %s"%(generations_total, individuals_file_name))
ax = fig.add_subplot(2,1,1)
ax.scatter(generations_offset, f, s=sizes[i], c=colours[i], alpha=0.4)
ax = fig.add_subplot(2,1,2)
ax.set_yscale('log')
ax.scatter(generations_offset, f, s=sizes[i], c=colours[i], alpha=0.4)
pylab.xlabel("Generation (%i individuals, offset slightly; larger circle => fitter)"%(population_total))
if save_to_file:
pylab.savefig(save_to_file, bbox_inches='tight')
fig = pylab.figure()
fig.canvas.set_window_title(title_prefix+" Histograms over %i generations of %s"%(generations_total, sim_var_names))
for i in range(val_num):
ax = pylab.subplot(nrows, ncols, i+1)
pylab.title(sim_var_names[i])
for generation in generations:
values = ind_vals[i][generation]
hist, bin_edges = np.histogram(values, bins=10)
half_bin_width = (bin_edges[1]-bin_edges[0])/2
xs = [be+half_bin_width for be in bin_edges[:-1]]
shade = 1- generation/(float(generations[-1])+1)
#print("Gen: %s; shade: %s; value bins: %s; tots: %s"%(generation,shade,xs,hist))
ax.plot(xs, hist, color=(shade,shade,shade))
if save_to_file_hist:
pylab.savefig(save_to_file_hist, bbox_inches='tight')
if show_plot_already:
pylab.show()
if __name__ == '__main__':
# example 3
target_values = {'amp': 65,
'period': 250,
'offset': -10}
# example 2
target_values = {'axon_gbar_na': 3661.79,
'axon_gbar_kv': 23.23,
'axon_gbar_kv3': 0.26,
'soma_gbar_na': 79.91,
'soma_gbar_kv': 0.58,
'soma_gbar_kv3': 1.57}
parameters = ['leak_cond_density',
'k_slow_cond_density',
'k_fast_cond_density',
'ca_boyle_cond_density',
'specific_capacitance',
'leak_erev',
'k_slow_erev',
'k_fast_erev',
'ca_boyle_erev']
#plot_generation_evolution(target_values.keys(), target_values)
plot_generation_evolution(parameters)
|
{
"content_hash": "725bac59014b9e15d43faedd359b7ea4",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 130,
"avg_line_length": 33.112994350282484,
"alnum_prop": 0.5405220952055964,
"repo_name": "vellamike/neurotune",
"id": "b486966ad0994940b2f583ca5a3cc061cb703f89",
"size": "5861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neurotune/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "57886"
}
],
"symlink_target": ""
}
|
'''
Created on 2017. 02. 12
Updated on 2017. 02. 12
@author:
'''
from __future__ import print_function
import os
from commons import DataLoader
from commons import Subjects
from commons import VersionUtil
from utils import Progress
class CodeRelevancy(DataLoader):
__name__ = u'CodeRelevancy'
isSplitCamel = False
def __init__(self):
pass
def run(self, _bugType, _camel=False):
S = Subjects()
data = {}
for group in S.groups:
if group not in data: data[group] = {}
for project in S.projects[group]:
msg = u'[%s/%s] %s, %s ' % (group, project, _bugType, 'camel' if _camel is True else '')
bugfile = os.path.join(S.getPath_featurebase(group, project), u'bugs', u'_terms', u'%s%s.tf' % (_bugType, '_camel' if _camel is True else ''))
versionName = VersionUtil.get_latest_version(S.bugs[project].keys())
srcfile = os.path.join(S.getPath_featurebase(group, project), u'sources', u'_terms', u'%s%s' % (versionName, '_camel' if _camel is True else ''))
data[group][project] = self.calc_project_relevancy(msg, bugfile, srcfile)
filename = os.path.join(S.getPath_featureroot(), 'PW-CodeRelevancy_%s%s.txt' % (_bugType, '_camel' if _camel is True else ''))
self.store_result(filename, data)
pass
def store_result(self, _filename, _data):
# save
f = open(_filename, 'w')
f.write('Group\tProject\tCode Relevancy (mul)\tCode Relevancy (min)\tCode Relevancy (max)\tCode Relevancy (mean)\tCode Relevancy (files)\tCode Relevancy (files_invers)\n')
for group, projects in _data.iteritems():
for project, items in projects.iteritems():
f.write('%s\t%s\t%f\t%f\t%f\t%f\t%f\t%f\n' % (
group, project, items['avgProduct'], items['avgMin'], items['avgMax'],
items['avgMean'], items['avgFiles'], items['avgFilesInverse']))
f.close()
pass
def calc_project_relevancy(self, _msg, _bugfile, _sourcefile):
progress = Progress(_msg, 2, 10, True)
progress.start()
print(u'loading..', end=u'')
bugItemTerms = self.load_itemwords(_bugfile)
SU = set(self.load_words_in_frequency(_sourcefile+'.idf'))
SrcFileTF = self.load_item_wordfrequency(_sourcefile + '.tf')
print(u'working', end=u'')
progress.set_point(0).set_upperbound(len(bugItemTerms))
stats = {}
for bugID, terms in bugItemTerms.iteritems():
stats[bugID] = self.calc_relevancy(terms, SU, SrcFileTF)
progress.check()
avgs = {}
avgs['avgProduct'] = sum([stats[bugID]['Product'] for bugID in stats]) / len(stats)
avgs['avgMin'] = sum([stats[bugID]['Min'] for bugID in stats]) / len(stats)
avgs['avgMax'] = sum([stats[bugID]['Max'] for bugID in stats]) / len(stats)
avgs['avgMean'] = sum([stats[bugID]['Mean'] for bugID in stats]) / len(stats)
avgs['avgFiles'] = sum([stats[bugID]['Files'] for bugID in stats]) / len(stats)
avgs['avgFilesInverse'] = sum([stats[bugID]['InverseFiles'] for bugID in stats]) / len(stats)
progress.done()
return avgs
def calc_relevancy(self, _bugTerms, _SU, _SrcFileTF):
stats = {'Product': 0, 'Min': 0, 'Max': 0, 'Mean': 0, 'Files': 0, 'InverseFiles': 0}
Wsu = list(set(_bugTerms) & _SU) # intersection between terms in specific bug report and terms in source code
if len(Wsu) == 0:
return stats
stats['Product'] = 1
stats['Min'] = 1/float(self.get_relevent_filecount(Wsu[0], _SrcFileTF))
for w in Wsu:
file_count = self.get_relevent_filecount(w, _SrcFileTF)
stats['Product'] *= (1/float(file_count) if file_count != 0 else 1) # removing NaN result.
stats['Min'] = min(1/float(file_count), stats['Min'])
stats['Max'] = max(1/float(file_count), stats['Max'])
stats['Mean'] += (1 / float(file_count))
stats['Mean'] = stats['Mean'] / float(len(Wsu))
stats['Files'] = self.get_files_include_word(Wsu, _SrcFileTF)
stats['InverseFiles'] = (1 / float(stats['Files'])) if stats['Files'] != 0 else 0
return stats
def get_relevent_filecount(self, _word, _srcFileTF):
idf_src = 0
for fileID, srcTerms in _srcFileTF.iteritems():
idf_src += (1 if _word in srcTerms else 0)
return idf_src
def get_files_include_word(self, _words, _srcFileTF):
fileSet = set([])
for fileID, srcTerms in _srcFileTF.iteritems():
for word in _words:
if word in srcTerms:
fileSet.add(fileID)
break
return len(fileSet)
###############################################################################################################
###############################################################################################################
if __name__ == "__main__":
obj = CodeRelevancy()
obj.run(_bugType='desc', _camel=False)
obj.run(_bugType='remain', _camel=False)
obj.run(_bugType='desc', _camel=True)
obj.run(_bugType='remain', _camel=True)
pass
|
{
"content_hash": "ac19d66d56cfd6431ae0485d55dbe661",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 173,
"avg_line_length": 37.544,
"alnum_prop": 0.6294481142126571,
"repo_name": "irblsensitivity/irblsensitivity",
"id": "3444cc95fa94dd3106e14657ee717c6cc3bcd5b1",
"size": "4716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/combine_features/PW_CodeRelevancy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "1768144"
},
{
"name": "Python",
"bytes": "374811"
},
{
"name": "Shell",
"bytes": "2451"
}
],
"symlink_target": ""
}
|
"""
This code was generated by Codezu.
Changes to this file may cause incorrect behavior and will be lost if
the code is regenerated.
"""
from mozurestsdk.mozuclient import default as default_client
from mozurestsdk.mozuurl import MozuUrl;
from mozurestsdk.urllocation import UrlLocation
class TenantAdminUserAuthTicket(object):
def __init__(self, mozuClient = None):
self.client = mozuClient or default_client();
def createUserAuthTicket(self,userAuthInfo, tenantId = None, responseFields = None):
""" Creates an authentication ticket for the supplied user to specify in API requests associated with the supplied tenant.
Args:
| userAuthInfo(userAuthInfo) - Information required to authenticate a user.
| tenantId (int) - Unique identifier of the development or production tenant for which to generate the user authentication ticket.
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| TenantAdminUserAuthTicket
Raises:
| ApiException
"""
url = MozuUrl("/api/platform/adminuser/authtickets/tenants?tenantId={tenantId}&responseFields={responseFields}", "POST", UrlLocation.HomePod, False);
url.formatUrl("responseFields", responseFields);
url.formatUrl("tenantId", tenantId);
self.client.withResourceUrl(url).withBody(userAuthInfo).execute();
return self.client.result();
def refreshAuthTicket(self,existingAuthTicket, tenantId = None, responseFields = None):
""" Generates a new user authentication ticket for the specified tenant by supplying the user's existing refresh token information.
Args:
| existingAuthTicket(existingAuthTicket) - Properties of the authentication ticket to be used in user claims with the API.
| tenantId (int) -
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| TenantAdminUserAuthTicket
Raises:
| ApiException
"""
url = MozuUrl("/api/platform/adminuser/authtickets/tenants?tenantId={tenantId}&responseFields={responseFields}", "PUT", UrlLocation.HomePod, False);
url.formatUrl("responseFields", responseFields);
url.formatUrl("tenantId", tenantId);
self.client.withResourceUrl(url).withBody(existingAuthTicket).execute();
return self.client.result();
def deleteUserAuthTicket(self,refreshToken):
""" Deletes the authentication ticket for the user by supplying the refresh token.
Args:
| refreshToken (string) - Alphanumeric string used for access tokens. This token refreshes access for accounts by generating a new developer or application account authentication ticket after an access token expires.
Raises:
| ApiException
"""
url = MozuUrl("/api/platform/adminuser/authtickets/?refreshToken={refreshToken}", "DELETE", UrlLocation.HomePod, False);
url.formatUrl("refreshToken", refreshToken);
self.client.withResourceUrl(url).execute();
|
{
"content_hash": "e210e110109eb02a1ed65db093d9dda5",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 219,
"avg_line_length": 37.0609756097561,
"alnum_prop": 0.7351102336294834,
"repo_name": "Mozu/mozu-python-sdk",
"id": "56a9622a352903ee2fad6cb2f003471c36bb0342",
"size": "3040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mozurestsdk/platform/adminuser/tenantadminuserauthticket.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "793537"
}
],
"symlink_target": ""
}
|
"""Base class for optimizers."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import checkpointable
from tensorflow.python.training import slot_creator
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
def _var_key(var):
if context.executing_eagerly():
return var._unique_id # pylint: disable=protected-access
return (var.op.graph, var.op.name)
class _OptimizableVariable(object):
"""Interface for abstracting over variables in the optimizers."""
@abc.abstractmethod
def target(self):
"""Returns the optimization target for this variable."""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def update_op(self, optimizer, g):
"""Returns the update ops for updating the variable."""
raise NotImplementedError("Calling an abstract method.")
class _RefVariableProcessor(_OptimizableVariable):
"""Processor for Variable."""
def __init__(self, v):
self._v = v
def __str__(self):
return "<_RefVariableProcessor(%s)>" % self._v
def target(self):
return self._v._ref() # pylint: disable=protected-access
def update_op(self, optimizer, g):
if isinstance(g, ops.Tensor):
update_op = optimizer._apply_dense(g, self._v) # pylint: disable=protected-access
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
else:
assert isinstance(g, ops.IndexedSlices), ("Gradient ", g, " is neither a "
"tensor nor IndexedSlices.")
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
# pylint: disable=protected-access
return optimizer._apply_sparse_duplicate_indices(g, self._v)
class _DenseReadResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0])
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _DenseResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
if isinstance(g, ops.IndexedSlices):
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return optimizer._resource_apply_sparse_duplicate_indices(
g.values, self._v, g.indices)
update_op = optimizer._resource_apply_dense(g, self._v)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _StreamingModelPortProcessor(_OptimizableVariable):
"""Processor for streaming ModelPorts."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
return g
class _TensorProcessor(_OptimizableVariable):
"""Processor for ordinary Tensors.
Even though a Tensor can't really be updated, sometimes it is useful to
compute the gradients with respect to a Tensor using the optimizer. Updating
the Tensor is, of course, unsupported.
"""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
raise NotImplementedError("Trying to update a Tensor ", self._v)
def _get_processor(v):
"""The processor of v."""
if context.executing_eagerly():
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
else:
return _DenseResourceVariableProcessor(v)
if isinstance(
v, resource_variable_ops.ResourceVariable) and not v._in_graph_mode: # pylint: disable=protected-access
# True if and only if `v` was initialized eagerly.
return _DenseResourceVariableProcessor(v)
if v.op.type == "VarHandleOp":
return _DenseResourceVariableProcessor(v)
if isinstance(v, variables.Variable):
return _RefVariableProcessor(v)
if v.op.type == "SubmodelPort":
return _StreamingModelPortProcessor(v)
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
raise NotImplementedError("Trying to optimize unsupported type ", v)
@tf_export("train.Optimizer")
class Optimizer(
# Optimizers inherit from CheckpointableBase rather than Checkpointable
# since they do most of their dependency management themselves (slot
# variables are special-cased, and non-slot variables are keyed to graphs).
checkpointable.CheckpointableBase):
"""Base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Add Ops to the graph to minimize a cost by updating a list of variables.
# "cost" is a Tensor, and the list of variables contains tf.Variable
# objects.
opt_op = opt.minimize(cost, var_list=<list of variables>)
```
In the training program you will just have to run the returned Op.
```python
# Execute opt_op to do one step of training:
opt_op.run()
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `compute_gradients()`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Compute the gradients for a list of variables.
grads_and_vars = opt.compute_gradients(loss, <list of variables>)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Gating Gradients
Both `minimize()` and `compute_gradients()` accept a `gate_gradients`
argument that controls the degree of parallelism during the application of
the gradients.
The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.
<b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides
the maximum parallelism in execution, at the cost of some non-reproducibility
in the results. For example the two gradients of `matmul` depend on the input
values: With `GATE_NONE` one of the gradients could be applied to one of the
inputs _before_ the other gradient is computed resulting in non-reproducible
results.
<b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before
they are used. This prevents race conditions for Ops that generate gradients
for multiple inputs where the gradients depend on the inputs.
<b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed
before any one of them is used. This provides the least parallelism but can
be useful if you want to process all gradients before applying any of them.
### Slots
Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`
allocate and manage additional variables associated with the variables to
train. These are called <i>Slots</i>. Slots have names and you can ask the
optimizer for the names of the slots that it uses. Once you have a slot name
you can ask the optimizer for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
"""
# Values for gate_gradients.
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, use_locking, name):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Args:
use_locking: Bool. If True apply use locks to prevent concurrent updates
to variables.
name: A non-empty string. The name to use for accumulators created
for the optimizer.
Raises:
ValueError: If name is malformed.
"""
if not name:
raise ValueError("Must specify the optimizer name")
self._use_locking = use_locking
self._name = name
# Dictionary of slots.
# {slot_name :
# {_var_key(variable_to_train): slot_for_the_variable, ... },
# ... }
self._slots = {}
self._non_slot_dict = {}
# For implementing Checkpointable. Stores information about how to restore
# slot variables which have not yet been created
# (checkpointable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
def get_name(self):
return self._name
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None,
grad_loss=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `compute_gradients()` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
@compatibility(eager)
When eager execution is enabled, `loss` should be a Python function that
takes elements of `var_list` as arguments and computes the value to be
minimized. If `var_list` is None, `loss` should take no arguments.
Minimization (and gradient computation) is done with respect to the
elements of `var_list` if not None, else with respect to any trainable
variables created during the execution of the `loss` function.
`gate_gradients`, `aggregation_method`, `colocate_gradients_with_ops` and
`grad_loss` are ignored when eager execution is enabled.
@end_compatibility
"""
grads_and_vars = self.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
return self.apply_gradients(grads_and_vars, global_step=global_step,
name=name)
def compute_gradients(self, loss, var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize or a callable taking
no arguments which returns the value to minimize. When eager execution
is enabled it must be a callable.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
RuntimeError: If called with eager execution enabled and `loss` is
not callable.
@compatibility(eager)
When eager execution is enabled, `gate_gradients`, `aggregation_method`,
and `colocate_gradients_with_ops` are ignored.
@end_compatibility
"""
if callable(loss):
with backprop.GradientTape() as tape:
if var_list is not None:
tape.watch(var_list)
loss_value = loss()
if var_list is None:
var_list = tape.watched_variables()
grads = tape.gradient(loss_value, var_list, grad_loss)
return list(zip(grads, var_list))
if context.executing_eagerly():
raise RuntimeError(
"`loss` passed to Optimizer.compute_gradients should "
"be a function when eager execution is enabled.")
if gate_gradients not in [Optimizer.GATE_NONE, Optimizer.GATE_OP,
Optimizer.GATE_GRAPH]:
raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" %
gate_gradients)
self._assert_valid_dtypes([loss])
if grad_loss is not None:
self._assert_valid_dtypes([grad_loss])
if var_list is None:
var_list = (
variables.trainable_variables() +
ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
else:
var_list = nest.flatten(var_list)
# pylint: disable=protected-access
var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)
# pylint: enable=protected-access
processors = [_get_processor(v) for v in var_list]
if not var_list:
raise ValueError("No variables to optimize.")
var_refs = [p.target() for p in processors]
grads = gradients.gradients(
loss, var_refs, grad_ys=grad_loss,
gate_gradients=(gate_gradients == Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if gate_gradients == Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes(
[v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource])
return grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
# This is a default implementation of apply_gradients() that can be shared
# by most optimizers. It relies on the subclass implementing the following
# methods: _create_slots(), _prepare(), _apply_dense(), and _apply_sparse().
grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.
if not grads_and_vars:
raise ValueError("No variables provided.")
converted_grads_and_vars = []
for g, v in grads_and_vars:
if g is not None:
try:
# Convert the grad to Tensor or IndexedSlices if necessary.
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError(
"Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
p = _get_processor(v)
converted_grads_and_vars.append((g, v, p))
converted_grads_and_vars = tuple(converted_grads_and_vars)
var_list = [v for g, v, _ in converted_grads_and_vars if g is not None]
if not var_list:
raise ValueError("No gradients provided for any variable: %s." %
([str(v) for _, _, v in converted_grads_and_vars],))
with ops.init_scope():
self._create_slots(var_list)
update_ops = []
with ops.name_scope(name, self._name) as name:
self._prepare()
for grad, var, processor in converted_grads_and_vars:
if grad is None:
continue
# We colocate all ops created in _apply_dense or _apply_sparse
# on the same device as the variable.
# TODO(apassos): figure out how to get the variable name here.
if context.executing_eagerly() or isinstance(
var,
resource_variable_ops.ResourceVariable) and not var._in_graph_mode: # pylint: disable=protected-access
scope_name = ""
else:
scope_name = var.op.name
with ops.name_scope("update_" + scope_name), ops.colocate_with(var):
update_ops.append(processor.update_op(self, grad))
if global_step is None:
apply_updates = self._finish(update_ops, name)
else:
with ops.control_dependencies([self._finish(update_ops, "update")]):
with ops.colocate_with(global_step):
if isinstance(global_step, resource_variable_ops.ResourceVariable):
# TODO(apassos): the implicit read in assign_add is slow; consider
# making it less so.
apply_updates = resource_variable_ops.assign_add_variable_op(
global_step.handle,
ops.convert_to_tensor(1, dtype=global_step.dtype),
name=name)
else:
apply_updates = state_ops.assign_add(global_step, 1, name=name)
if not context.executing_eagerly():
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
named_slots = self._slots.get(name, None)
if not named_slots:
return None
return named_slots.get(_var_key(var), None)
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
return sorted(self._slots.keys())
def variables(self):
"""A list of variables which encode the current state of `Optimizer`.
Includes slot variables and additional global variables created by the
optimizer in the current default graph.
Returns:
A list of variables.
"""
executing_eagerly = context.executing_eagerly()
current_graph = ops.get_default_graph()
def _from_current_graph(variable):
if executing_eagerly:
# No variable.op in eager mode. We don't expect lots of eager graphs,
# but behavior should be consistent with graph mode.
return variable._graph_key == current_graph._graph_key # pylint: disable=protected-access
else:
return variable.op.graph is current_graph
optimizer_variables = [v for v in self._non_slot_variables()
if _from_current_graph(v)]
for _, variable_dict in self._slots.items():
for _, slot_for_variable in variable_dict.items():
if _from_current_graph(slot_for_variable):
optimizer_variables.append(slot_for_variable)
# Sort variables by name so that the return is deterministic.
return sorted(optimizer_variables, key=lambda v: v.name)
def _create_non_slot_variable(self, initial_value, name, colocate_with):
"""Add an extra variable, not associated with a slot."""
eager = context.executing_eagerly()
graph = None if eager else colocate_with.graph
key = (name, graph)
v = self._non_slot_dict.get(key, None)
if v is None:
self._maybe_initialize_checkpointable()
with ops.colocate_with(colocate_with):
if eager:
restored_initial_value = self._preload_simple_restoration(
name=name, shape=None)
if restored_initial_value is not None:
initial_value = restored_initial_value
v = variable_scope.variable(initial_value, name=name, trainable=False)
# Restore this variable by name if necessary, but don't add a
# Checkpointable dependency. Optimizers return the current graph's
# non-slot variables from _checkpoint_dependencies explicitly rather
# than unconditionally adding dependencies (since there may be multiple
# non-slot variables with the same name in different graphs, trying to
# save all of them would result in errors).
self._handle_deferred_dependencies(name=name, checkpointable=v)
self._non_slot_dict[key] = v
return v
@property
def _checkpoint_dependencies(self):
"""From Checkpointable. Gather graph-specific non-slot variables to save."""
current_graph_non_slot_variables = []
current_graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
for (name, _), variable_object in sorted(self._non_slot_dict.items(),
# Avoid comparing graphs
key=lambda item: item[0][0]):
if variable_object._graph_key == current_graph_key: # pylint: disable=protected-access
current_graph_non_slot_variables.append(
checkpointable.CheckpointableReference(
name=name, ref=variable_object))
return (super(Optimizer, self)._checkpoint_dependencies
+ current_graph_non_slot_variables)
def _lookup_dependency(self, name):
"""From Checkpointable. Find a non-slot variable in the current graph."""
unconditional = super(Optimizer, self)._lookup_dependency(name)
if unconditional is not None:
return unconditional
graph = None if context.executing_eagerly() else ops.get_default_graph()
return self._get_non_slot_variable(name, graph=graph)
def _get_non_slot_variable(self, name, graph=None):
return self._non_slot_dict.get((name, graph), None)
def _non_slot_variables(self):
"""Additional variables created by the `Optimizer`.
Returns:
A list or tuple of variables.
"""
return self._non_slot_dict.values()
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError(
"Invalid type %r for %s, expected: %s." % (
dtype, t.name, [v for v in valid_dtypes]))
# --------------
# Methods to be implemented by subclasses if they want to use the
# inherited implementation of apply_gradients() or compute_gradients().
# --------------
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set(
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
def _create_slots(self, var_list):
"""Create all slots needed by the variables.
Args:
var_list: A list of `Variable` objects.
"""
# No slots needed by default
pass
def _prepare(self):
"""Create all needed tensors before applying gradients.
This is called with the name_scope using the "name" that
users have chosen for the application of gradients.
"""
pass
def _apply_dense(self, grad, var):
"""Add ops to apply dense gradients to `var`.
Args:
grad: A `Tensor`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _resource_apply_dense(self, grad, handle):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _apply_sparse_duplicate_indices(self, grad, var):
"""Add ops to apply sparse gradients to `var`, with repeated sparse indices.
Optimizers which override this method must deal with IndexedSlices objects
such as the following:
IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])
The correct interpretation is:
IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])
Many optimizers deal incorrectly with repeated indices when updating based
on sparse gradients (e.g. summing squares rather than squaring the sum, or
applying momentum terms multiple times). Adding first is always the correct
behavior, so this is enforced here by reconstructing the IndexedSlices to
have only unique indices, then calling _apply_sparse.
Optimizers which deal correctly with repeated indices may instead override
this method to avoid the overhead of summing indices.
Args:
grad: `IndexedSlices`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
summed_values, unique_indices = _deduplicate_indexed_slices(
values=grad.values, indices=grad.indices)
gradient_no_duplicate_indices = ops.IndexedSlices(
indices=unique_indices,
values=summed_values,
dense_shape=grad.dense_shape)
return self._apply_sparse(gradient_no_duplicate_indices, var)
def _apply_sparse(self, grad, var):
"""Add ops to apply sparse gradients to `var`.
The IndexedSlices object passed to `grad` in this function is by default
pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate
indices (see its docstring for details). Optimizers which can tolerate or
have correct special cases for duplicate sparse indices may override
`_apply_sparse_duplicate_indices` instead of this function, avoiding that
overhead.
Args:
grad: `IndexedSlices`, with no repeated indices.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _finish(self, update_ops, name_scope):
"""Do what is needed to finish the update.
This is called with the `name_scope` using the "name" that
users have chosen for the application of gradients.
Args:
update_ops: List of `Operation` objects to update variables. This list
contains the values returned by the `_apply_dense()` and
`_apply_sparse()` calls.
name_scope: String. Name to use for the returned operation.
Returns:
The operation to apply updates.
"""
return control_flow_ops.group(*update_ops, name=name_scope)
# --------------
# Utility methods for subclasses.
# --------------
def _slot_dict(self, slot_name):
"""Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name.
"""
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def _get_or_make_slot(self, var, val, slot_name, op_name):
"""Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_slot(var, val, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype,
slot_name, op_name):
"""Find or create a slot for a variable, using an Initializer.
Args:
var: A `Variable` object.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_slot_with_initializer(
var, initializer, shape, dtype, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
def _zeros_slot(self, var, slot_name, op_name):
"""Find or create a slot initialized with 0.0.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(var, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
# --------------
# For implementing the Checkpointable interface.
# --------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `checkpointable._CheckpointPosition` object
indicating the slot variable `Checkpointable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
named_slots = self._slot_dict(slot_name)
variable_key = _var_key(variable)
slot_variable = named_slots.get(variable_key, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()):
initializer = checkpointable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self._get_or_make_slot(
var=variable,
val=initializer,
slot_name=slot_name,
op_name=self._name)
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
|
{
"content_hash": "c6f424ae009ed3e9d257c352278957f9",
"timestamp": "",
"source": "github",
"line_count": 1039,
"max_line_length": 115,
"avg_line_length": 38.75553416746872,
"alnum_prop": 0.6738023691856856,
"repo_name": "Xeralux/tensorflow",
"id": "bf79714f9682e60b97788b8b470821cfe9290886",
"size": "40957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/optimizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "340972"
},
{
"name": "C++",
"bytes": "39479562"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33675501"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425916"
}
],
"symlink_target": ""
}
|
"""
Run a genetic algorithm to find an appropriate architecture for some image
classification task with Keras+TF.
To use, define a `GenomeHandler` defined in genomehandler.py. Then pass it, with
training data, to a DEvol instance to run the genetic algorithm. See the readme
for more detailed instructions.
"""
from __future__ import print_function
import random as rand
import csv
import operator
import gc
import os
from datetime import datetime
from keras.callbacks import EarlyStopping
from keras.models import load_model
import keras.backend as K
from sklearn.metrics import log_loss
import numpy as np
if K.backend() == 'tensorflow':
import tensorflow as tf
__all__ = ['DEvol']
METRIC_OPS = [operator.__lt__, operator.__gt__]
METRIC_OBJECTIVES = [min, max]
class DEvol:
"""
Object which carries out genetic search and returns top performing model
upon completion.
"""
def __init__(self, genome_handler, data_path=""):
"""
Initialize a DEvol object which carries out the training and evaluation
of a genetic search.
Args:
genome_handler (GenomeHandler): the genome handler object defining
the restrictions for the architecture search space
data_path (str): the file which the genome encodings and metric data
will be stored in
"""
self.genome_handler = genome_handler
self.datafile = data_path or (datetime.now().ctime() + '.csv')
self._bssf = -1
if os.path.isfile(data_path) and os.stat(data_path).st_size > 1:
raise ValueError(('Non-empty file %s already exists. Please change'
'file path to prevent overwritten genome data.'
% data_path))
print("Genome encoding and metric data stored at", self.datafile, "\n")
with open(self.datafile, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
metric_cols = ["Val Loss", "Val Accuracy"]
genome = genome_handler.genome_representation() + metric_cols
writer.writerow(genome)
def set_objective(self, metric):
"""
Set the metric for optimization. Can also be done by passing to
`run`.
Args:
metric (str): either 'acc' to maximize classification accuracy, or
else 'loss' to minimize the loss function
"""
if metric == 'acc':
metric = 'accuracy'
if metric not in ['loss', 'accuracy']:
raise ValueError(('Invalid metric name {} provided - should be'
'"accuracy" or "loss"').format(metric))
self._metric = metric
self._objective = "max" if self._metric == "accuracy" else "min"
self._metric_index = 1 if self._metric == 'loss' else -1
self._metric_op = METRIC_OPS[self._objective == 'max']
self._metric_objective = METRIC_OBJECTIVES[self._objective == 'max']
def run(self, dataset, num_generations, pop_size, epochs, fitness=None,
metric='accuracy'):
"""
Run genetic search on dataset given number of generations and
population size
Args:
dataset : tuple or list of numpy arrays in form ((train_data,
train_labels), (validation_data, validation_labels))
num_generations (int): number of generations to search
pop_size (int): initial population size
epochs (int): epochs for each model eval, passed to keras model.fit
fitness (None, optional): scoring function to be applied to
population scores, will be called on a numpy array which is
a min/max scaled version of evaluated model metrics, so It
should accept a real number including 0. If left as default
just the min/max scaled values will be used.
metric (str, optional): must be "accuracy" or "loss" , defines what
to optimize during search
Returns:
keras model: best model found with weights
"""
self.set_objective(metric)
# If no validation data is given set it to None
if len(dataset) == 2:
(self.x_train, self.y_train), (self.x_test, self.y_test) = dataset
self.x_val = None
self.y_val = None
else:
(self.x_train, self.y_train), (self.x_test, self.y_test), (self.x_val, self.y_val) = dataset
# generate and evaluate initial population
members = self._generate_random_population(pop_size)
pop = self._evaluate_population(members,
epochs,
fitness,
0,
num_generations)
# evolve
for gen in range(1, num_generations):
members = self._reproduce(pop, gen)
pop = self._evaluate_population(members,
epochs,
fitness,
gen,
num_generations)
return load_model('best-model.h5')
def _reproduce(self, pop, gen):
members = []
# 95% of population from crossover
for _ in range(int(len(pop) * 0.95)):
members.append(self._crossover(pop.select(), pop.select()))
# best models survive automatically
members += pop.get_best(len(pop) - int(len(pop) * 0.95))
# randomly mutate
for imem, mem in enumerate(members):
members[imem] = self._mutate(mem, gen)
return members
def _evaluate(self, genome, epochs):
model = self.genome_handler.decode(genome)
loss, accuracy = None, None
fit_params = {
'x': self.x_train,
'y': self.y_train,
'validation_split': 0.1,
'epochs': epochs,
'verbose': 1,
'callbacks': [
EarlyStopping(monitor='val_loss', patience=1, verbose=1)
]
}
if self.x_val is not None:
fit_params['validation_data'] = (self.x_val, self.y_val)
try:
model.fit(**fit_params)
loss, accuracy = model.evaluate(self.x_test, self.y_test, verbose=0)
except Exception as e:
loss, accuracy = self._handle_broken_model(model, e)
self._record_stats(model, genome, loss, accuracy)
return model, loss, accuracy
def _record_stats(self, model, genome, loss, accuracy):
with open(self.datafile, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
row = list(genome) + [loss, accuracy]
writer.writerow(row)
met = loss if self._metric == 'loss' else accuracy
if (self._bssf is -1 or
self._metric_op(met, self._bssf) and
accuracy is not 0):
try:
os.remove('best-model.h5')
except OSError:
pass
self._bssf = met
model.save('best-model.h5')
def _handle_broken_model(self, model, error):
del model
n = self.genome_handler.n_classes
loss = log_loss(np.concatenate(([1], np.zeros(n - 1))), np.ones(n) / n)
accuracy = 1 / n
gc.collect()
if K.backend() == 'tensorflow':
K.clear_session()
tf.reset_default_graph()
print('An error occurred and the model could not train:')
print(error)
print(('Model assigned poor score. Please ensure that your model'
'constraints live within your computational resources.'))
return loss, accuracy
def _evaluate_population(self, members, epochs, fitness, igen, ngen):
fit = []
for imem, mem in enumerate(members):
self._print_evaluation(imem, len(members), igen, ngen)
res = self._evaluate(mem, epochs)
v = res[self._metric_index]
del res
fit.append(v)
fit = np.array(fit)
self._print_result(fit, igen)
return _Population(members, fit, fitness, obj=self._objective)
def _print_evaluation(self, imod, nmod, igen, ngen):
fstr = '\nmodel {0}/{1} - generation {2}/{3}:\n'
print(fstr.format(imod + 1, nmod, igen + 1, ngen))
def _generate_random_population(self, size):
return [self.genome_handler.generate() for _ in range(size)]
def _print_result(self, fitness, generation):
result_str = ('Generation {3}:\t\tbest {4}: {0:0.4f}\t\taverage:'
'{1:0.4f}\t\tstd: {2:0.4f}')
print(result_str.format(self._metric_objective(fitness),
np.mean(fitness),
np.std(fitness),
generation + 1, self._metric))
def _crossover(self, genome1, genome2):
cross_ind = rand.randint(0, len(genome1))
child = genome1[:cross_ind] + genome2[cross_ind:]
return child
def _mutate(self, genome, generation):
# increase mutations as program continues
num_mutations = max(3, generation // 4)
return self.genome_handler.mutate(genome, num_mutations)
class _Population(object):
def __len__(self):
return len(self.members)
def __init__(self, members, fitnesses, score, obj='max'):
self.members = members
scores = fitnesses - fitnesses.min()
if scores.max() > 0:
scores /= scores.max()
if obj == 'min':
scores = 1 - scores
if score:
self.scores = score(scores)
else:
self.scores = scores
self.s_fit = sum(self.scores)
def get_best(self, n):
combined = [(self.members[i], self.scores[i])
for i in range(len(self.members))]
sorted(combined, key=(lambda x: x[1]), reverse=True)
return [x[0] for x in combined[:n]]
def select(self):
dart = rand.uniform(0, self.s_fit)
sum_fits = 0
for i in range(len(self.members)):
sum_fits += self.scores[i]
if sum_fits >= dart:
return self.members[i]
|
{
"content_hash": "e56ec334fadd06bbdf27a5e43593d3ba",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 104,
"avg_line_length": 36.92657342657343,
"alnum_prop": 0.5544929457437743,
"repo_name": "joeddav/devol",
"id": "0eab94655442cae6352f2148c64e53e04eec46ad",
"size": "10561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devol/devol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3805"
},
{
"name": "Python",
"bytes": "24566"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import argparse
import sys
import numpy as np
import os.path
import csv
import random
csv_file_name = "train.csv"
test_csv_file_name = "test.csv"
train_file_name = "mnist_rnn_train.csv"
test_file_name = "mnist_rnn_test.csv"
submit_test_file_name = "test_28000.csv"
submit_result_file_name = "submission_sijiangdu.csv"
# set random seed for comparing the two result calculations
tf.set_random_seed(1)
## this is data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# hyperparameters
csv_size = 42000
train_size = 40000
test_size = 2000
epochs = 8
training_iters = train_size * epochs
batch_size = 100
sample_size = 28*28
n_cell= 400
n_steps = 28 #4 # time steps
n_classes = 10 # MNIST classes (0-9 digits)
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_steps, 28*28/n_steps])
y = tf.placeholder(tf.float32, [None, n_classes])
def RNN_Wrapper(X, num_classes, n_hidden_units, forget_bias = 1.0, name='Basic_LSTM'):
with tf.name_scope(name):
W = tf.Variable(tf.truncated_normal([n_hidden_units, num_classes],stddev=0.1))
b = tf.Variable(tf.zeros([num_classes]))
cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units, forget_bias = forget_bias)
init_state = cell.zero_state(batch_size, dtype=tf.float32)
outputs, final_state = tf.nn.dynamic_rnn(cell, X,sequence_length=[sample_size for i in range(batch_size)], initial_state=init_state, time_major=False)
outputs = tf.unstack(tf.transpose(outputs, [1,0,2]))
results = tf.matmul(outputs[-1], W) + b
return results
# partition: randomly partion the input file to two files. E.g. partition = 0.8: 80% for training 20% for testing.
def csv_partition_train_test(input_file, partition=0.952):
global csv_size, train_size, test_size,training_iters
csv_size = 0
train_size = 0
test_size = 0
with open(input_file) as data:
with open(FLAGS.data_dir+test_file_name, 'w+') as test:
with open(FLAGS.data_dir+train_file_name, 'w+') as train:
header = next(data)
test.write(header)
train.write(header)
csv_r = csv.reader(data)
csv_w_train = csv.writer(train)
csv_w_test = csv.writer(test)
for line in csv_r:
csv_size += 1
if(len(line)!=785):
print("Invalid CSV format. Discard record #%s"%(csv_size))
continue
if random.random() < partition:
csv_w_train.writerow(line)
train_size += 1
else:
csv_w_test.writerow(line)
test_size += 1
training_iters = train_size * epochs
print("CSV input size = %s, train set size = %s, validation set size = %s"%(csv_size , train_size,test_size))
#add a dummy column to the test.csv
def csv_test_csv_file_change(input_file, output_file):
with open(input_file) as data:
with open(FLAGS.data_dir+output_file, 'w+') as out_file:
header = next(data)
out_file.write("label,"+header)
csv_r = csv.reader(data)
csv_w = csv.writer(out_file)
size = 0
for line in csv_r:
size += 1
line = [-1] + line
if(len(line)!=785):
print("Invalid test.csv. Discard record #%s"%(size))
continue
csv_w.writerow(line)
print("test.csv input size = %s"%(size))
def read_mnist_csv(filename_queue):
reader = tf.TextLineReader(skip_header_lines=1)
key, value = reader.read(filename_queue)
record_defaults = [[0]for row in range(785)]
cols = tf.decode_csv( value, record_defaults=record_defaults)
features = tf.stack(cols[1:])
label = tf.stack([cols[0]])
return features, label
def input_pipeline(filenames, batch_size, num_epochs=None):
print(filenames)
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=num_epochs, shuffle=True)
features, label = read_mnist_csv(filename_queue)
# min_after_dequeue defines how big a buffer we will randomly sample
# from -- bigger means better shuffling but slower start up and more
# memory used.
# capacity must be larger than min_after_dequeue and the amount larger
# determines the maximum we will prefetch. Recommendation:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
min_after_dequeue = 10000
capacity = min_after_dequeue + 3 * batch_size
feature_batch, label_batch = tf.train.shuffle_batch(
[features, label], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
return feature_batch, label_batch
# display image
show_num = 3
fig_mnist, ax_array = plt.subplots(show_num,show_num)
def show_mnist(images,labels,title = "Digits"):
global fig_mnist, ax_array
plt.figure(fig_mnist.number)
fig_mnist.suptitle(title)
n = len(images)
z = np.zeros((28,28))
t = [[i] for i in range(10)]
for i in range(show_num*show_num):
row = int(i/show_num)
col = int(i%show_num)
if i<n:
img = images[i].reshape(28,28)
ax_array[row,col].imshow(img, cmap=cm.binary)
ax_array[row, col].set_title(int(labels[i]))
ax_array[row, col].axis('off')
else:
ax_array[row, col].imshow(z, cmap=cm.binary)
ax_array[row, col].set_title('')
ax_array[row, col].axis('off')
plt.draw()
plt.pause(0.3)
def train():
if not os.path.exists(FLAGS.data_dir+train_file_name)\
or not os.path.exists(FLAGS.data_dir+test_file_name):
csv_partition_train_test(csv_file_name)
if not os.path.exists(FLAGS.data_dir+submit_test_file_name):
csv_test_csv_file_change(test_csv_file_name, submit_test_file_name)
pred = RNN_Wrapper(x, n_classes, n_cell, FLAGS.forget_bias)
with tf.name_scope('train'):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
train_op = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(cost)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
with tf.name_scope('classification'):
classification = tf.argmax(pred, 1)
tf.summary.scalar('accuracy', accuracy)
with tf.name_scope('input_images'):
example_batch_train, label_batch_train = input_pipeline(tf.constant([FLAGS.data_dir+train_file_name]), batch_size)
example_batch_test, label_batch_test = input_pipeline(tf.constant([FLAGS.data_dir+test_file_name]), batch_size)
example_batch_submit, label_batch_submit = input_pipeline(tf.constant([FLAGS.data_dir+submit_test_file_name]), batch_size,1)
test_by_mnist_lib = False
train_by_mnist_lib = True
def feed_dict(train, submit=False):
"""Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
if train:
if train_by_mnist_lib == True:
xs, ys = mnist.train.next_batch(batch_size)
xs = xs.reshape([batch_size, n_steps, -1])
return {x: xs, y: ys}
xs, ys_label = sess.run([example_batch_train, label_batch_train])
else:
if submit:
xs, ys_label = sess.run([example_batch_submit, label_batch_submit])
else:
if test_by_mnist_lib == True:
xs, ys = mnist.test.next_batch(batch_size)
xs = xs.reshape([batch_size, n_steps, -1])
return {x: xs, y: ys}
xs, ys_label = sess.run([example_batch_test, label_batch_test])
xs = xs.reshape([batch_size, n_steps, -1])
n = ys_label.shape[0]
ys = np.zeros((n,10))
if not submit:
for i in range(n):
ys[i][int(ys_label[i])] = 1
xs = xs/255
return {x: xs, y: ys}
sess = tf.InteractiveSession()
with tf.name_scope('training_epoch'):
# Merge all the summaries and write them out to /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
step = 0
acc = 0
# plotting
fig1, ax = plt.subplots(1,1)
plt.ylim((-0.5, 1.5))
plt.ylabel('Accuracy')
plt.xlabel('Training Step ( bactch size:' + str(int(batch_size)) + ')')
ax.text(0.05, 0.90,'Max Time -Steps in RNN cell: ' + str(n_steps), ha='left', va='center', color='blue', transform=ax.transAxes)
ax.text(0.05, 0.85,'Number of Units in RNN Cell: ' + str(n_cell), ha='left', va='center', color='blue', transform=ax.transAxes)
text_acc = ax.text(0.65, 0.90,'Accuracy: ' + str(acc), ha='left', va='center', color='green', transform=ax.transAxes)
# ax.text(0.65, 0.85, str(mnist.test.labels.shape[0])+ ' Samples', ha='left', va='center', color='green', transform=ax.transAxes)
fig1.suptitle('Tensorflow RNN BasicLSTMCell - MNIST Digit Recognizer')
plt.draw()
plt.pause(0.3)
try:
# while step * batch_size < training_iters:
submit_file = open(FLAGS.data_dir+submit_result_file_name, 'w+')
submit_file.write("ImageId,Label\r\n")
csv_w_submit = csv.writer(submit_file)
submit_size = 0
# training_iters = 3000
while not coord.should_stop():
#generate output submission file
if step * batch_size > training_iters:
feed_d = feed_dict(False,True)
[digits] = sess.run([classification], feed_d)
for i in digits:
submit_size += 1
line = [str(submit_size),str(i)]
csv_w_submit.writerow(line)
if(submit_size%1000 == 0):
print(submit_size)
show_mnist(feed_d[x],digits, "Outputs to submission file")
continue
#training
sess.run([train_op], feed_dict=feed_dict(True))
step += 1
#testing and plotting training progress
test_at = 100
if step % test_at == 0:
tmp = acc
#Init images those are incorrectly classified
s = np.zeros((1,28*28))
d = np.zeros(1)
test_loop = 100
acc_l = [0.0]*test_loop
for i in range(test_loop):
feed_d=feed_dict(False)
acc_l[i],summary,digits = sess.run([accuracy,merged,classification], feed_dict=feed_d)
train_writer.add_summary(summary, step)
#show the images those are incorrectly classified
if len(s) > show_num*show_num: continue
correct = np.argmax(feed_d[y],1)
for i in range(batch_size):
if correct[i] != digits[i]:
s = np.append(s,np.array([feed_d[x][i].flatten()]),0)
d = np.append(d,np.array([digits[i]]),0)
acc = np.mean(acc_l);
show_mnist(s[1:],d[1:],"Incorect Classifications")
print(acc)
plt.figure(fig1.number)
plt.plot([step-test_at,step], [tmp, acc],'g')
text_acc.set_text('Accuracy: ' + str(acc))
plt.draw()
plt.savefig(FLAGS.result_dir+'/mnist_rnn_LSTM'+'.png')
plt.pause(0.3)
except tf.errors.OutOfRangeError:
print('Done training and testing -- epoch limit reached')
finally:
# When done, ask the threads to stop.
coord.request_stop()
submit_file.close()
if not coord.should_stop():
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
result_str = str(round(int(acc*1000)))+'_step'+str(int(round(n_steps*1000)))+'_cell'+str(n_cell)+'_b'+str(step)
plt.figure(fig1.number)
plt.savefig(FLAGS.result_dir+'/mnist_rnn_LSTM'+result_str+'.png')
train_writer.close()
def main(_):
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
if tf.gfile.Exists(FLAGS.data_dir):
tf.gfile.DeleteRecursively(FLAGS.data_dir)
if not tf.gfile.Exists(FLAGS.data_dir):
tf.gfile.MakeDirs(FLAGS.data_dir)
if not tf.gfile.Exists(FLAGS.result_dir):
tf.gfile.MakeDirs(FLAGS.result_dir)
train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', type=float, default=0.005,
help='Initial learning rate')
parser.add_argument('--forget_bias', type=float, default= 0.9,
help='forget bias for training')
parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist_rnn/input_data/',
help='Directory for storing input data')
parser.add_argument('--log_dir', type=str, default='/tmp/tensorflow/mnist_rnn/logs/mnist_rnn_with_summaries',
help='Summaries log directory')
parser.add_argument('--result_dir', type=str, default='/tmp/tensorflow/mnist_rnn/result',
help='result plotting PNG files directory')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
{
"content_hash": "ca26e7fa1472cb1e912fdc2622bad710",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 154,
"avg_line_length": 40.19047619047619,
"alnum_prop": 0.5844717033732925,
"repo_name": "sijiangdu/Tensorflow",
"id": "45a8dc4c861d5a52f2b81e332f2f90434b8ab187",
"size": "15082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mnist_rnn/mnist_rnn_kaggles_cmpt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "77258"
}
],
"symlink_target": ""
}
|
"""
`Unit tests for cargo.builder.create_user`
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
2016 Jared Lunde © The MIT License (MIT)
http://github.com/jaredlunde
"""
import unittest
import psycopg2
from vital.security import randkey
from cargo import ORM, db, fields, Function, Clause
from cargo.builder import create_type
from cargo.builder.types import Type
def new_field(type='text', value=None, name=None, table=None):
field = getattr(fields, type.title())(value=value)
field.field_name = name or randkey(24, keyspace='aeioughrstlnmy')
field.table = table or randkey(24, keyspace='aeioughrstlnmy')
return field
class TestCreateType(unittest.TestCase):
orm = ORM()
def test_create(self):
attrs = ((new_field(), Clause('COLLATE', 'LATIN1')),
(new_field(), Clause('COLLATE', 'LATIN1')),)
type = create_type(self.orm, 'foo', attrs=attrs, dry=True)
print(type.query.mogrified)
type = Type(self.orm, 'foo')
type.options(input=Function('foo_in').func,
output=Function('foo_out').func,
internallength=16,
element='float4')
print(type.query)
print(type.query.mogrified)
type = Type(self.orm,
'foo',
Clause('PASSEDBYVALUE'),
input=Function('foo_in').func,
output=Function('foo_out').func,
internallength=16,
element='float4')
print(type.query)
print(type.query.mogrified)
if __name__ == '__main__':
# Unit test
unittest.main()
|
{
"content_hash": "110fd3bacf3c69cba835cb21e2836303",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 80,
"avg_line_length": 31.296296296296298,
"alnum_prop": 0.5568047337278107,
"repo_name": "jaredlunde/cargo-orm",
"id": "3ed465f7099b070cfcc46cf21851df99211c8e6c",
"size": "1763",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "unit_tests/builders/create_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1155740"
},
{
"name": "Shell",
"bytes": "288"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.