content stringlengths 5 1.05M |
|---|
l=0
k=1
n=int(input("Enter number of terms"))
while(k<=n):
l=k**3+2*k
k+=1
print(l, end =" ")
|
import json
class Location(object):
def __init__(self, d):
self.id = d.get('Id', None) # bosses do not have Ids
self.room = d['Room']
self.name = d['Name']
self.area = d['Area']
self.visited = False
class Locations(object):
def __init__(self, raw_locations):
self.locations = [ Location(loc) for loc in raw_locations ]
self.by_id = { loc.id : loc for loc in self.locations }
self.by_name = { loc.name : loc for loc in self.locations }
self.by_room = { }
for loc in self.locations:
self.by_room[loc.room] = self.by_room.get(loc.room, [ ])
self.by_room[loc.room].append(loc)
@staticmethod
def read(filename):
return Locations(json.load(open(filename)))
def by_name_or_room(self, name_or_room):
loc = self.by_name.get(name_or_room, None)
if loc:
return [ loc ]
locs = self.by_room.get(name_or_room, None)
if locs:
return locs
return None
|
import tensorflow as tf
# construct nodes
NODE1 = tf.constant(3.0, dtype=tf.float32)
NODE2 = tf.constant(4.0)
print('NODE1, NODE2:', NODE1, NODE2)
# run nodes in session
SESS = tf.Session()
print('SESS.run(NODE1, NODE2):', SESS.run([NODE1, NODE2]))
# add combo node
NODE3 = tf.add(NODE1, NODE2)
print('NODE3:', NODE3)
print('SESS.run(NODE3):', SESS.run(NODE3))
# add placeholders and a node for adding
A = tf.placeholder(tf.float32)
B = tf.placeholder(tf.float32)
ADDER_NODE = A + B
print(SESS.run(ADDER_NODE, {A: 3, B: 4.5}))
print(SESS.run(ADDER_NODE, {A: [1, 3], B: [2, 4]}))
# add a node for tripling
ADD_AND_TRIPLE = ADDER_NODE * 3
print(SESS.run(ADD_AND_TRIPLE, {A: 3, B: 4.5}))
# add variables for linear model
SLOPE = tf.Variable([.3], dtype=tf.float32)
INITIAL = tf.Variable([-.3], dtype=tf.float32)
VAL = tf.placeholder(tf.float32)
LINEAR = SLOPE * VAL + INITIAL
# initialize variables
INIT = tf.global_variables_initializer()
SESS.run(INIT)
print(SESS.run(LINEAR, {VAL: [1, 2, 3, 4]}))
# linear regression loss model
RESULT = tf.placeholder(tf.float32)
DELTAS_SQR = tf.square(LINEAR - RESULT)
LOSS = tf.reduce_sum(DELTAS_SQR)
print(SESS.run(LOSS, {VAL: [1, 2, 3, 4], RESULT: [0, -1, -2, -3]}))
# change slope and initial
TRUE_SLOPE = tf.assign(SLOPE, [-1.])
TRUE_INITIAL = tf.assign(INITIAL, [1.])
SESS.run([TRUE_SLOPE, TRUE_INITIAL])
print(SESS.run(LOSS, {VAL: [1, 2, 3, 4], RESULT: [0, -1, -2, -3]}))
# add gradient descent optimizer trainer
OPTIMIZER = tf.train.GradientDescentOptimizer(0.01)
TRAIN = OPTIMIZER.minimize(LOSS)
# run trainer
SESS.run(INIT)
for i in range(1000):
SESS.run(TRAIN, {VAL: [1, 2, 3, 4], RESULT: [0, -1, -2, -3]})
print(SESS.run([SLOPE, INITIAL]))
# evaluate accuracy
CURR_SLOPE, CURR_INITIAL, CURR_LOSS = SESS.run([SLOPE, INITIAL, LOSS], {VAL: [1, 2, 3, 4], RESULT: [0, -1, -2, -3]})
print("Slope: %s Initial: %s Loss: %s" % (CURR_SLOPE, CURR_INITIAL, CURR_LOSS))
|
from django.forms import ModelForm
from .models import Case
class CaseForm(ModelForm):
class Meta:
model = Case
fields = '__all__'
labels = {
'title': '标题',
'description': '故障描述',
'projects': '影响项目'
}
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script for Cinema4D which pulls in a folder of terrain .obj files and sets proper
# metadata on each tile.
import os
import unicodedata
import c4d
from c4d import documents, storage, bitmaps
# Constants
colladaExportTagId = 1039717
# Relevant COLLADA Export tag ids
COLLADA_EXPORT_SETTINGS_CUSTOM_DATA = 1021
class TileTexInfo:
def __init__( self, id, baseSize, xsmPrefix, smPrefix, hasMediumSize, hasLargeSize ):
self.tileID = id
self.baseSize = baseSize
self.xsmPrefix = xsmPrefix
self.smPrefix = smPrefix
self.hasMediumSize = hasMediumSize
self.hasLargeSize = hasLargeSize
def toString( self ):
result = 'TYPE TILE, ID ' + self.tileID + ', SIZE ' + str( self.baseSize ) + ', XSM ' + self.xsmPrefix + ', SM ' + self.smPrefix + ', MD '
if self.hasMediumSize:
result += '1'
else:
result += '0'
result += ', LG '
if self.hasLargeSize:
result += '1'
else:
result += '0'
return result
def main():
c4d.StopAllThreads()
tileTexInfoDict = {}
# Get a path to load the tile meshes from
directoryPath = c4d.storage.LoadDialog( title = 'Select terrain tile directory', flags = c4d.FILESELECT_DIRECTORY )
decodedDirPath = directoryPath.decode( 'utf-8' )
# Exit if the user cancels the load dialog
if directoryPath is None:
return
# Get a list of all files in the selected directory
dirList = [ os.path.normcase( f ) for f in os.listdir( decodedDirPath ) ]
# Get a list of all .obj files in the selected directory
objList = [ os.path.join( decodedDirPath, f ) for f in dirList
if os.path.splitext( f )[ 1 ] == '.obj' and isTileMesh( f ) ]
# Exit of no obj files are found
if len( objList ) is 0:
return
# Get a list of all _xsm prefixed .jpg files
xsmallJPEGList = [ os.path.join( decodedDirPath, f ) for f in dirList
if os.path.splitext( f )[ 1 ] == '.jpg' and '_xsm' in os.path.splitext( f )[ 0 ] and isTileMesh( f ) ]
# Get a list of all _sm prefixed .jpg files
smallJPEGList = [ os.path.join( decodedDirPath, f ) for f in dirList
if os.path.splitext( f )[ 1 ] == '.jpg' and '_sm' in os.path.splitext( f )[ 0 ] and isTileMesh( f ) ]
# Get a list of all unprefixed .jpg files
baseJPEGList = [ os.path.join( decodedDirPath, f ) for f in dirList
if os.path.splitext( f )[ 1 ] == '.jpg' and '_' not in os.path.splitext( f )[ 0 ] and isTileMesh( f ) ]
# Get a list of all _lg prefixed .jpg files
largeJPEGList = [ os.path.join( decodedDirPath, f ) for f in dirList
if os.path.splitext( f )[ 1 ] == '.jpg' and '_lg' in os.path.splitext( f )[ 0 ] and isTileMesh( f ) ]
# Set up TileTexInfo objects
for i in range( 0, len( baseJPEGList ) ):
# Get xsmall jpg filename
xsmFilename = xsmallJPEGList[ i ]
xsmFilenameASCII = unicodeToASCII( xsmFilename )
# Get small jpg filename
smFilename = smallJPEGList[ i ]
smFilenameASCII = unicodeToASCII( smFilename )
# Get base jpg filename
baseFilename = baseJPEGList[ i ]
baseFilenameASCII = unicodeToASCII( baseFilename )
# Get large jpg filename
largeFilename = largeJPEGList[ i ]
largeFilenameASCII = unicodeToASCII( largeFilename )
# Get xsmall jpg dimensions
xsmBitmap = c4d.bitmaps.BaseBitmap()
xsmBitmap.InitWith( xsmFilenameASCII )
xsmSize = xsmBitmap.GetSize()[ 0 ]
# Get small jpg dimensions
smBitmap = c4d.bitmaps.BaseBitmap()
smBitmap.InitWith( smFilenameASCII )
smSize = smBitmap.GetSize()[ 0 ]
# Get base jpg dimensions
baseBitmap = c4d.bitmaps.BaseBitmap()
baseBitmap.InitWith( baseFilenameASCII )
baseSize = baseBitmap.GetSize()[ 0 ]
# Get large jpg dimensions
largeBitmap = c4d.bitmaps.BaseBitmap()
largeBitmap.InitWith( largeFilenameASCII )
largeSize = largeBitmap.GetSize()[ 0 ]
xsmPrefix = 'xsm'
smPrefix = 'sm'
hasMediumSize = True
hasLargeSize = False
# Swap xsm and sm sizes if sm is actually smaller than xsm
if smSize < xsmSize:
xsmPrefix = 'sm'
smPrefix = 'xsm'
if baseSize == smSize or baseSize == xsmSize:
hasMediumSize = False
if largeSize > baseSize:
hasLargeSize = True
basename = os.path.basename( baseFilename )
tileID = os.path.splitext( basename )[ 0 ]
tileTexInfoDict[ tileID ] = TileTexInfo( tileID, baseSize, xsmPrefix, smPrefix, hasMediumSize, hasLargeSize )
# Merge all tile meshes
for filename in objList:
# Convert unicode filename to ASCII
asciiFilename = unicodeToASCII( filename )
# Get tile ID from the filename
basename = os.path.basename( filename )
tileID = os.path.splitext( basename )[ 0 ]
if c4d.documents.MergeDocument( doc, asciiFilename, c4d.SCENEFILTER_OBJECTS | c4d.SCENEFILTER_MATERIALS | c4d.SCENEFILTER_MERGESCENE ) is True:
newTile = doc.GetFirstObject()
newTile.SetName ( tileID )
newTag = newTile.MakeTag( colladaExportTagId )
tagData = newTag.GetDataInstance()
customData = tileTexInfoDict[ tileID ].toString()
tagData.SetString( COLLADA_EXPORT_SETTINGS_CUSTOM_DATA, customData )
# Get the background mesh path
backgroundPath = os.path.join( decodedDirPath, 'background.obj' )
backgroundPathASCII = unicodeToASCII( backgroundPath )
# Merge the background mesh
if c4d.documents.MergeDocument( doc, backgroundPathASCII, c4d.SCENEFILTER_OBJECTS | c4d.SCENEFILTER_MATERIALS | c4d.SCENEFILTER_MERGESCENE ) is True:
newBackground = doc.GetFirstObject()
newBackground.SetName( 'BACKGROUND' )
backroundTag = newBackground.MakeTag( colladaExportTagId )
tagData = backroundTag.GetDataInstance()
tagData.SetString( COLLADA_EXPORT_SETTINGS_CUSTOM_DATA, 'TYPE BACKGROUND' )
# Get the simple collision mesh path
collisionPath = os.path.join( decodedDirPath, 'simple.obj' )
collisionPathASCII = unicodeToASCII( collisionPath )
# Merge the collision mesh
if c4d.documents.MergeDocument( doc, collisionPathASCII, c4d.SCENEFILTER_OBJECTS | c4d.SCENEFILTER_MATERIALS | c4d.SCENEFILTER_MERGESCENE ) is True:
newCollision = doc.GetFirstObject()
newCollision.SetName( 'SIMPLE' )
collisionTag = newCollision.MakeTag( colladaExportTagId )
tagData = collisionTag.GetDataInstance()
tagData.SetString( COLLADA_EXPORT_SETTINGS_CUSTOM_DATA, 'TYPE SIMPLE' )
def unicodeToASCII( str ):
return unicodedata.normalize( 'NFKD', str ).encode( 'ascii', 'ignore' )
def isTileMesh( f ):
return os.path.splitext( f )[ 0 ] != 'simple' and os.path.splitext( f )[ 0 ] != 'background'
if __name__ == '__main__':
main() |
import os
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Renames a Django project'
def add_arguments(self, parser):
parser.add_argument('current', type=str, nargs='+',
help='The current Django project folder name')
parser.add_argument('new', type=str, nargs='+',
help='The new Django project name')
def handle(self, *args, **kwargs):
current_project_name = kwargs['current'][0]
new_project_name = kwargs['new'][0]
# logic for renaming the files
files_to_rename = [f'{current_project_name}/wsgi.py', 'manage.py']
for f in files_to_rename:
with open(f, 'r') as file:
filedata = file.read()
filedata = filedata.replace(current_project_name, new_project_name)
with open(f, 'w') as file:
file.write(filedata)
os.rename(current_project_name, new_project_name)
self.stdout.write(self.style.SUCCESS(
'Project has been renamed to %s' % new_project_name))
|
"""
Drop-in replacement RPC Client for REST over XML-RPC.
Usage is to just change a usual import like this:
from xmlrpc.client import ServerProxy
To this:
from restxrpc.rpcclient import ServerProxy
Additionally, set the URL for XML-RPC specs download, e.g.:
ServerProxy.set_specs_url("http://myhost:8080/xmlrpcspecs")
Author: bo@suse.de
"""
from typing import Optional, Dict, Union, Any
import urllib.parse
import datetime
import requests
import yaml
import logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(asctime)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
class RESTCall:
"""
REST call object.
"""
_TYPEMAP: Dict[str, Any] = {
"string": str,
"int": int,
"bool": bool,
"datetime": datetime.datetime # ??
}
def __init__(self, url: str, obj: str):
"""
__init__ constructor
:param url: URL
:type url: str
"""
self._root = url.strip("/")
self._path = [obj]
self._xmlrpc_spec = None
def lock(self) -> None:
"""
lock is a function that locks all the methods of RESTCall instance.
"""
for method in RESTCall.__dict__.keys():
if not method.startswith("_"):
setattr(self, method, None)
def set_spec_uri(self, uri: str) -> "RESTCall":
"""
set_spec sets the XML-RPC spec URI to download it.
:param uri: URI on the same server to the XML-RPC spec
:type uri: str
:return: RESTCall object
:rtype: RESTCall
"""
url: str = ""
r: urllib.parse.ParseResult = urllib.parse.urlparse(uri)
if not r.scheme:
_url = urllib.parse.urlparse(self._root)
url = "{}://{}/{}".format(_url.scheme, _url.netloc, uri.strip("/"))
else:
url = uri
r = urllib.parse.urlparse(url)
if r.scheme == "file":
assert bool(r.path), "Path to the file spec should be defined!"
self._xmlrpc_spec = yaml.load(open(r.path).read()).get("xmlrpc")
else:
assert bool(r.path), "URN to the spec endpoint should be defined!"
self._xmlrpc_spec = requests.get(url).json().get("xmlrpc")
assert bool(self._xmlrpc_spec), "Cannot create REST layer: No XML-RPC specs has been found!"
self.lock()
return self
def _is_root(self) -> bool:
"""
is_root tells if the node is Root node.
:return: True if node is root.
:rtype: bool
"""
return self._root is None
def __getattr__(self, attr):
if attr not in self.__dict__: # or self.__dict__[attr] is None:
self._path.append(attr)
return self
def _get_uri(self) -> str:
"""
_get_uri get URI path.
:return: URI
:rtype: str
"""
return "/".join(self._path).strip("/")
def _get_namespace(self) -> str:
"""
_get_namespace get namespace path of the current function
:return: XML-RPC namespace.
:rtype: str
"""
return ".".join(self._path).strip(".")
def _get_func_spec(self) -> Dict:
"""
_get_func_spec get path as an XMl-RPC namespace and resolves function spec
:return: namespace path
:rtype: str
"""
spec: Dict = {}
ns: str = self._get_namespace()
spec_ns: dict
if self._xmlrpc_spec:
for spec_ns in self._xmlrpc_spec:
if ns in spec_ns.keys():
spec = spec_ns[ns]
break
return spec
def _map_parameters(self, *args) -> Dict:
"""
_map_parameters maps parameters of the XML-RPC to the REST according to the spec.
:return: Data mapping
:rtype: Dict
"""
spec = self._get_func_spec()
assert len(spec) == len(args), "Invalid parameters for the function {}: given: {}, expected: {}".format(
self._get_namespace(), args, ", ".join(['"{}" ({})'.format(list(item.keys())[0],
list(item.values())[0]) for item in spec]))
kwargs = {}
for arg_ntp, arg_val in zip(spec, args):
arg_name, arg_type = tuple(arg_ntp.items())[0]
e_arg_type = self._TYPEMAP.get(arg_type, str)
assert isinstance(arg_val, e_arg_type), "Argument {} has wrong type. Expected {}, got {}.".format(
arg_val, e_arg_type.__name__, type(arg_val).__name__
)
kwargs[arg_name] = arg_val
return kwargs
def __call__(self, *args, **kwargs):
"""
__call__ performs an actual REST call via requests
"""
url = "{}/{}".format(self._root, self._get_uri())
data = self._map_parameters(*args)
logging.debug("calling URL %s with data %s", url, data)
return requests.post(url, data=data).json()
class ServerProxy:
"""
REST client over XML-RPC.
"""
SPEC_URN: str = "/xmlrpc/spec"
SPEC_URL: Optional[str] = None
def __init__(self, url: str, *args, **kwargs):
"""
__init__ of ServerProxy class.
:param url: URL of the RPC endpoint.
:type url: str
"""
self._url = url
@staticmethod
def set_spec_url(url: str) -> None:
"""
set_spec_url sets an alternative URL for the specs download.
It may be local URL file:// or any other. If this is set, the
default one won't be used.
:param url: URL
:type url: str
"""
ServerProxy.SPEC_URL = url
@staticmethod
def set_spec_urn(urn: str) -> None:
"""
set_spec_urn sets an alternative URN for the specs download.
Default is "/xmlrpc/spec".
:param uri: URN of the URI to the RPC endpoint.
:type uri: str
"""
ServerProxy.SPEC_URN = urn
def __getattr__(self, attr) -> RESTCall:
return RESTCall(self._url, attr).set_spec_uri(ServerProxy.SPEC_URL or self.SPEC_URN)
# Backward compatibility for xmlrpclib
Server = ServerProxy
|
#In[]
# coding=utf-8
import numpy as np
import Putil.base.logger as plog
plog.PutilLogConfig.config_handler(plog.stream_method)
plog.PutilLogConfig.config_log_level(stream=plog.DEBUG)
plog.PutilLogConfig.config_format(plog.Format)
logger = plog.PutilLogConfig('TestCOCODataUnit').logger()
logger.setLevel(plog.DEBUG)
import Putil.data.coco as coco
from Putil.data.io_convertor import IOConvertorNoOp
coco.COCOData.set_seed(64)
coco_data = coco.COCOData('/data2/Public_Data/COCO/unzip_data/2017', coco.COCOData.Stage.Evaluate, './test/data/result/test_coco_data_unit', detection=True)
convertor = IOConvertorNoOp()
coco_data.set_convert_to_input_method(convertor)
data = coco_data[1]
#In[]:
import matplotlib.pyplot as plt
import numpy as np
import cv2
print(data[0].shape)
print(data[1].shape)
plt.imshow(data[0])
plt.show()
plt.imshow((data[1][:, :, 6] * 255).astype(np.uint8), cmap=plt.cm.gray)
print(cv2.resize(data[1][:, :, 6], (100, 200)).shape)
plt.show() |
import pytest
import itertools
from functools import partial
import flax
import jax
import numpy as np
from numpy import testing
import jax.numpy as jnp
import jax.flatten_util
from jax.scipy.sparse.linalg import cg
import netket as nk
from netket.optimizer import qgt
from netket.optimizer.qgt import qgt_onthefly_logic as _sr_onthefly_logic
from .. import common
QGT_objects = {}
QGT_objects["JacobianPyTree"] = partial(qgt.QGTJacobianPyTree, diag_shift=0.00)
solvers = {}
solvers["svd"] = nk.optimizer.solver.svd
solvers["cholesky"] = nk.optimizer.solver.cholesky
solvers["LU"] = nk.optimizer.solver.LU
solvers["solve"] = nk.optimizer.solver.solve
dtypes = {"float": float, "complex": complex}
@pytest.fixture(params=[pytest.param(dtype, id=name) for name, dtype in dtypes.items()])
def vstate(request):
N = 5
hi = nk.hilbert.Spin(1 / 2, N)
g = nk.graph.Chain(N)
dtype = request.param
ma = nk.models.RBM(
alpha=1,
dtype=dtype,
hidden_bias_init=nk.nn.initializers.normal(),
visible_bias_init=nk.nn.initializers.normal(),
)
vstate = nk.variational.MCState(
nk.sampler.MetropolisLocal(hi),
ma,
)
vstate.init_parameters(
nk.nn.initializers.normal(stddev=0.001), seed=jax.random.PRNGKey(3)
)
vstate.sample()
return vstate
@pytest.mark.parametrize(
"qgt",
[pytest.param(sr, id=name) for name, sr in QGT_objects.items()],
)
@pytest.mark.parametrize(
"solver",
[pytest.param(solver, id=name) for name, solver in solvers.items()],
)
def test_qgt_solve(qgt, vstate, solver, _mpi_size, _mpi_rank):
S = qgt(vstate)
x, _ = S.solve(solver, vstate.parameters)
|
#!/usr/bin/env python
# * DS-Explorer is being developed for the TANGO Project: http://tango-project.eu
# * Copyright 2018 CETIC www.cetic.be
# * DS-Explorer is a free software: you can redistribute it and/or modify
# * it under the terms of the BSD 3-Clause License
# * Please see the License file for more information
import sys
import File_Parsing
import Report_parsing
import Command_Exec
sourcePath = sys.argv[1] # Location of the yaml files
a = File_Parsing.FileParse(sourcePath)
# Command line execution for compilation, build and implementation of design
b = Command_Exec.CommandExec(sourcePath)
# Report parsing and generation of consolidated report after implementation
c = Report_parsing.ReportParse(sourcePath)
|
# coding: spec
from photons_messages_generator import test_helpers as thp
from photons_messages_generator import errors
from delfick_project.errors_pytest import assertRaises
describe "Multiple":
it "does not allow broken bytes":
src = """
packets:
one:
OnePacketExample:
pkt_type: 1
size_bytes: 2
fields:
- name: "Failure"
type: "[10]byte"
size_bits: 10
- type: "reserved"
size_bits: 6
"""
adjustments = """
num_reserved_fields_in_frame: 3
"""
msg = "Only basic types and reserved may be a partial byte"
with assertRaises(errors.BadSizeBytes, msg, name="byte"):
with thp.generate(src, adjustments):
pass
it "does not allow broken strings":
src = """
packets:
one:
OnePacketExample:
pkt_type: 1
size_bytes: 2
fields:
- name: "Failure"
type: "[10]byte"
size_bits: 10
- type: "reserved"
size_bits: 6
"""
adjustments = """
num_reserved_fields_in_frame: 3
changes:
OnePacketExample:
fields:
Failure:
string_type: true
"""
msg = "Only basic types and reserved may be a partial byte"
with assertRaises(errors.BadSizeBytes, msg, name="string"):
with thp.generate(src, adjustments):
pass
it "Treats size_bits 1 bool as a Bool":
src = """
packets:
one:
OnePacketExample:
pkt_type: 1
size_bytes: 2
fields:
- name: "ABool"
type: "bool"
size_bytes: 1
- name: "ARealBool"
type: "bool"
size_bits: 1
- type: "reserved"
size_bits: 1
- type: "reserved"
size_bits: 6
"""
adjustments = """
num_reserved_fields_in_frame: 3
"""
with thp.generate(src, adjustments) as output:
expected_messages = """
# fmt: off
########################
### ONE
########################
class OneMessages(Messages):
PacketExample = msg(1
, ("a_bool", T.BoolInt)
, ("a_real_bool", T.Bool)
, ("reserved4", T.Reserved(1))
, ("reserved5", T.Reserved(6))
)
# fmt: on
__all__ = ["OneMessages"]
"""
output.assertFileContents("messages.py", expected_messages)
|
from jsonrpcbase import InvalidParamsError
from JobBrowserBFF.schemas.Schema import Schema, SchemaError
class Validation(object):
def __init__(self, schema_dir=None, load_schemas=None):
self.schema = Schema(schema_dir=schema_dir, load_schemas=load_schemas)
def validate_params(self, method_name, data):
schema_key = method_name + "_params"
try:
self.schema.validate(schema_key, data)
except SchemaError as ex:
raise InvalidParamsError(
data={
"schema_error": ex.message,
"schema_path": ex.path,
"schema_value": ex.value,
}
)
def validate_result(self, method_name, data):
schema_key = method_name + "_result"
try:
self.schema.validate(schema_key, data)
except SchemaError as ex:
raise InvalidParamsError(
data={
"schema_error": ex.message,
"schema_path": ex.path,
"schema_value": ex.value,
}
)
def validate(self, schema_key, data):
try:
self.schema.validate(schema_key, data)
except SchemaError as ex:
raise ex
def validate_config(self, data):
schema_key = "config"
try:
self.schema.validate(schema_key, data)
except SchemaError as ex:
raise InvalidParamsError(
data={
"schema_error": ex.message,
"schema_path": ex.path,
"schema_value": ex.value,
}
)
|
from collections import deque
class DigraphCycle(object):
def __init__(self, graph):
indegrees = [graph.get_indegree(v) for v in range(graph.v)]
queue = deque([v for v, indegree in enumerate(indegrees) if indegree==0])
while queue:
v = queue.popleft()
for w in graph.get_siblings(v):
indegrees[w] -= 1
if indegrees[w] == 0:
queue.append(w)
edge_to = [0 for _ in range(graph.v)]
root = -1
for v in range(graph.v):
if indegrees[v] == 0:
continue
root = v
for w in graph.get_siblings(v):
if indegrees[v] > 0:
edge_to[w] = v
if root == -1:
return
cycle = []
v = root
while True:
cycle.append(v)
v = edge_to[v]
if v == root:
break
cycle.append(v)
|
# **********************************************************************************************************************
# FileName:
# bp_test.py
#
# Description:
# Boilerplate generator for LLD testing files
#
# Usage Examples:
# N/A
#
# 2020 | Brandon Braun | brandonbraun653@gmail.com
# **********************************************************************************************************************
from generators.lld_templates.bp_common import BaseAttributes
class LLDTestEntryTemplate(BaseAttributes):
""" Generates starter header/source data for LLD test entry files """
@property
def has_header_file(self):
return False
@property
def has_source_file(self):
return True
@property
def include_guard(self):
return None
def generate_header_filename(self):
return None
def generate_header_text(self):
return None
def generate_source_filename(self):
return "test_entry_lld_" + self.driver.lower() + ".cpp"
def generate_source_text(self):
template = self.__module_template()
return template.format(periph_lc=self.driver.lower(), periph_uc=self.driver.upper(),
year=self.year, author=self.author, email=self.email,
include_guard=self.include_guard)
@staticmethod
def __module_template():
return \
"""/********************************************************************************
* File Name:
* test_entry_lld_{periph_lc}.cpp
*
* Description:
* Entry into the test suite for LLD {periph_uc}
*
* {year} | {author} | {email}
*******************************************************************************/
#if defined( THOR_LLD_TEST_{periph_uc} )
#include "gtest/gtest.h"
int main( int argc, char **argv )
{{
::testing::InitGoogleTest( &argc, argv );
return RUN_ALL_TESTS();
}}
#endif /* THOR_LLD_TEST_{periph_uc} */
"""
class LLDTestDriverTemplate(BaseAttributes):
""" Generates starter header/source data for LLD test entry files """
@property
def has_header_file(self):
return False
@property
def has_source_file(self):
return True
@property
def include_guard(self):
return None
def generate_header_filename(self):
return None
def generate_header_text(self):
return None
def generate_source_filename(self):
return "test_lld_" + self.driver.lower() + "_driver.cpp"
def generate_source_text(self):
template = self.__module_template()
return template.format(periph_lc=self.driver.lower(), periph_uc=self.driver.upper(),
year=self.year, author=self.author, email=self.email,
include_guard=self.include_guard)
@staticmethod
def __module_template():
return \
"""/********************************************************************************
* File Name:
* test_lld_{periph_lc}_driver.cpp
*
* Description:
* Tests the Thor {periph_uc} low level driver
*
* {year} | {author} | {email}
*******************************************************************************/
#if defined( THOR_LLD_TEST_{periph_uc} )
/* GTest Includes */
#include "gtest/gtest.h"
/* Chimera Includes */
#include <Chimera/common>
#include <Chimera/{periph_lc}>
/* Thor Includes */
#include <Thor/cfg>
#include <Thor/lld/interface/{periph_lc}/{periph_lc}.hpp>
TEST(Compiler, CanCompile)
{{
EXPECT_EQ(0, 0);
Thor::LLD::{periph_uc}::initialize();
}}
#endif /* THOR_LLD_TEST_{periph_uc} */
""" |
from google.appengine.ext import ndb
from . import models
class Site(models.Model):
root_folder_id = ndb.StringProperty()
site_title = ndb.StringProperty()
logo_url = ndb.StringProperty()
email_footer = ndb.TextProperty()
sidebar = ndb.StringProperty()
path = ndb.StringProperty()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 11:37:07 2019
@author: deborahkhider
"""
import xarray as xr
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import imageio
import os
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from matplotlib.colors import Normalize
import sys
import ast
class PiecewiseNorm(Normalize):
def __init__(self, levels, clip=False):
# the input levels
self._levels = np.sort(levels)
# corresponding normalized values between 0 and 1
self._normed = np.linspace(0, 1, len(levels))
Normalize.__init__(self, None, None, clip)
def __call__(self, value, clip=None):
# linearly interpolate to get the normalized value
return np.ma.masked_array(np.interp(value, self._levels, self._normed))
def histedges_equalN(x, nbin):
npt = len(x)
return np.interp(np.linspace(0, npt, nbin + 1),
np.arange(npt),
np.sort(x))
dataset_name = sys.argv[1]
figsize = ast.literal_eval(sys.argv[2])
#open the file
dataset = xr.open_dataset(dataset_name)
#Get the only variable. According to Scott, one file/variable
varname = list(dataset.data_vars.keys())[0]
## Get the flow values
val = dataset[varname].values
#val2 = exposure.equalize_hist(val)
nx = dataset.nx.values
ny = dataset.ny.values
## get the edges
val_attrs = dataset[varname].attrs
ymin = val_attrs['y_south_edge']
ymax= val_attrs['y_north_edge']
xmin = val_attrs['x_west_edge']
xmax = val_attrs['x_east_edge']
## get the steps
dx = val_attrs['dx']/3600
dy = val_attrs['dy']/3600
## easting/northing vectors
lon = xmin+dx/2+nx*dx
lat = ymin+dy/2+ny*dy
## convert to lat/lon for sanity
xx,yy=np.meshgrid(lon,lat)
xx2 = np.reshape(xx,xx.size)
yy2 = np.reshape(yy,yy.size)
dv= pd.DataFrame({'lon':xx2,'lat':yy2})
#make the map in cartopy
proj = ccrs.PlateCarree(central_longitude = np.mean(dataset['nx']))
idx = dataset['time'].values.size
count = list(np.arange(0,idx,1))
# get the levels to plot
levels = np.sort(np.unique(histedges_equalN(np.reshape(val,np.size(val)),60)))
# get the box
X_min= np.round(np.min(lon),2)
X_max= np.round(np.max(lon),2)
Y_min= np.round(np.min(lat),2)
Y_max= np.round(np.max(lat),2)
step_lon = np.round(np.ptp(lon)/5,2)
step_lat = np.round(np.ptp(lat)/5,2)
# Names
long_name = dataset[varname].attrs['long_name']
if '_' in long_name:
t = long_name.split('_')
strT = t[0].capitalize()
for item in t[1:]:
strT = strT + ' ' + item.capitalize()
else:
strT = long_name.capitalize()
filenames =[]
#Make a directory if it doesn't exit
if os.path.isdir('./figures') is False:
os.makedirs('./figures')
# loop to create all figures for each time slice
for i in count:
v = val[i,:,:]
fig,ax = plt.subplots(figsize=figsize)
ax = plt.axes(projection=proj)
ax.add_feature(cfeature.BORDERS)
ax.add_feature(cfeature.COASTLINE)
img = plt.contourf(lon, lat, v, levels,
transform=proj, cmap=cm.gist_gray,norm=PiecewiseNorm(levels))
ticks = levels[0::15]
ticks = np.sort(np.insert(ticks,-1,levels[-1]))
cbar = plt.colorbar(img, orientation = 'horizontal',
format = '%.2e',ticks=ticks)
cbar.ax.set_xlabel((strT+'('+dataset[varname].attrs['units']+')'))
ax.set_extent([X_min,X_max,Y_min,Y_max])
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
gl.xlines = False
gl.ylines = False
gl.xlocator = mticker.FixedLocator(np.arange(X_min,X_max,step_lon))
gl.ylocator = mticker.FixedLocator(np.arange(Y_min,Y_max,step_lat))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color': 'gray'}
gl.ylabel_style = {'size': 12, 'color': 'gray'}
ax.add_feature(cfeature.RIVERS)
#save as jpeg
filename = './figures/'+varname+'_t'+str(i)+'.jpeg'
filenames.append(filename)
plt.savefig(filename)
plt.close(fig)
#Make the GIF
images = []
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave(dataset_name+'_'+long_name+'_movie.gif', images)
|
from redis import Redis
from rq import Queue, Worker
redis = Redis(host='redis', port=6379)
queue = Queue('model_prediction', connection=redis)
if __name__ == '__main__':
print('Starting Worker')
worker = Worker([queue], connection=redis, name='model_prediction')
worker.work()
print('Ending Worker')
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
setup(
name='negative-cycles',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.4',
description='For Negative Cycle Detection',
long_description='',
# The project's main homepage.
url='https://github.com/mnpatil17/negative_cycles/',
# Author details
author='Mihir Patil',
author_email='mnpatil17@gmail.com',
# Choose your license
license='BSD 3-clause "New" or "Revised License"',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='negative-cycle bellman-ford',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['numpy', 'nose'],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
|
#!/usr/bin/python
import sys
result= {}
for line in sys.stdin:
line = line.strip()
trimmedNucleotide,seqIds = line.split('#', 1)
try:
result[trimmedNucleotide] = 0
except ValueError:
pass
for word in result:
print '%s' % (word)
|
import asyncio
import unittest
from slixmpp import JID
from slixmpp.test.integration import SlixIntegration
class TestUserAvatar(SlixIntegration):
async def asyncSetUp(self):
await super().asyncSetUp()
self.add_client(
self.envjid('CI_ACCOUNT1'),
self.envstr('CI_ACCOUNT1_PASSWORD'),
)
self.register_plugins(['xep_0084', 'xep_0115'])
self.data = b'coucou coucou'
await self.connect_clients()
await asyncio.gather(
self.clients[0]['xep_0115'].update_caps(),
)
async def _clear_avatar(self):
"""Utility for purging remote state"""
await self.clients[0]['xep_0084'].stop()
await self.clients[0]['xep_0084'].publish_avatar(b'')
async def test_set_avatar(self):
"""Check we can set and get a PEP avatar and metadata"""
await self._clear_avatar()
await self.clients[0]['xep_0084'].publish_avatar(
self.data
)
metadata = {
'id': self.clients[0]['xep_0084'].generate_id(self.data),
'bytes': 13,
'type': 'image/jpeg',
}
# Wait for metadata publish event
event = self.clients[0].wait_until('avatar_metadata_publish')
publish = self.clients[0]['xep_0084'].publish_avatar_metadata(
metadata,
)
res = await asyncio.gather(
event,
publish,
)
message = res[0]
recv_meta = message['pubsub_event']['items']['item']['avatar_metadata']
info = recv_meta['info']
self.assertEqual(info['bytes'], metadata['bytes'])
self.assertEqual(info['type'], metadata['type'])
self.assertEqual(info['id'], metadata['id'])
recv = await self.clients[0]['xep_0084'].retrieve_avatar(
JID(self.clients[0].boundjid.bare),
info['id']
)
avatar = recv['pubsub']['items']['item']['avatar_data']['value']
self.assertEqual(avatar, self.data)
await self._clear_avatar()
suite = unittest.TestLoader().loadTestsFromTestCase(TestUserAvatar)
|
import random
import pyfiglet
import MyColors
import os
lbiu = MyColors.lightColorsBIU()
nbiu = MyColors.NormalBIUColors()
lbold = MyColors.lightColorsBold()
def game():
os.system("clear")
name = input(lbold.LIGHT_greenBold("\n\n\nYour name: "))
print("\nWelcome")
print(pyfiglet.figlet_format(name))
l = random.randint(1,100)
c = random.randrange(2,100,4)
m = l +l
print("\nThink a number")
input(lbold.LIGHT_cyanBold("\nHit enter to continue! :"))
k = m+c
print(f"Add the number you've thought! ")
input(lbold.LIGHT_megantaBold("Hit enter to continue! :"))
print(f"Add {c} to the total! ")
input(lbold.LIGHT_megantaBold("Hit enter to continue! :"))
j = k/2
print(f"Divide the total by 2! :")
input(lbold.LIGHT_cyanBold("\nHit enter to continue! :"))
s = j-l
print("Sub the number you've thought from the remaining amount! ")
input(lbold.LIGHT_cyanBold("\nHit enter to continue! :"))
print(f"You are left with {s}")
no = ["n", "no"]
cont = ""
while cont not in no:
game()
cont = input(lbold.LIGHT_cyanBold("Do you want to play again!"))
|
# Create your awesome net!!
import pickle
f = open('deep_convnet_params.pkl','rb')
data = pickle.load(f)
print (data) |
NAMES = [
'k5z',
'k5ta',
'k5ma',
'k5tb2',
'k5mb2',
'k5tb3',
'k5mb3',
'k6t',
'k6m',
'k1ta',
'k1ma',
'k1tb',
'k1mb',
'k2ta',
'k2ma',
'k2tb',
'k2mb',
'k2tc',
'k2mc',
'k3t',
'k3m',
'k4t',
'k4m',
#signal
'sinput',
'sbase',
'tpulse',
'traise',
'tdecay',
'tdelay',
'slate',
]
for idx, name in enumerate(NAMES):
exec(
'{} = {:d}'.format(
name, idx
)
)
NUM = len(NAMES)
|
import argparse
import subprocess
from pathlib import Path
import pickle
import numpy as np
from ...Data import pipeline
from alphafold.Model import AlphaFold, AlphaFoldFeatures, model_config
import torch
import numpy as np
import matplotlib.pylab as plt
def string_plot(af2, thist, field):
af2t = torch.from_numpy(af2_proc_features[field][0, :])
thist = this_proc_features[field][0, :]
N = af2t.shape[0]
M = int(np.sqrt(N))
af2t = af2t[:M*M].view(M,M)
thist = thist[:M*M].view(M,M)
plt.subplot(1,2,1)
plt.title(f'AF2: {field}')
plt.imshow(af2t)
plt.subplot(1,2,2)
plt.title(f'This: {field}')
plt.imshow(thist)
plt.show()
def image_plot(af2, thist, field):
if field == 'msa_feat':
raise NotImplementedError()
else:
batch_size = this_proc_features[field].size(0)
size_x = this_proc_features[field].size(1)
size_y = this_proc_features[field].size(2)
af2t = torch.from_numpy(af2_proc_features[field])
thist = this_proc_features[field]
fig = plt.figure(figsize=(2*6*size_x/float(size_x+size_y), batch_size*6*size_y/float(size_x+size_y)))
for i in range(batch_size):
plt.subplot(batch_size,2,2*i+1)
if i==0:
plt.title(f'AF2: {field}')
if size_x < size_y:
plt.imshow(af2t[i,:,:])
else:
plt.imshow(af2t[i,:,:].transpose(0,1))
for i in range(batch_size):
plt.subplot(batch_size,2,2*i+2)
if i==0:
plt.title(f'This: {field}')
if size_x < size_y:
plt.imshow(thist[i,:,:])
else:
plt.imshow(thist[i,:,:].transpose(0,1))
plt.tight_layout()
plt.show()
def msa_feat_plot(af2, thist):
field = 'msa_feat'
batch_size = this_proc_features[field].size(3)
size_x = this_proc_features[field].size(1)
size_y = this_proc_features[field].size(2)
af2t = torch.from_numpy(af2_proc_features[field][0,:,:,:]).transpose(1,2).transpose(0,1)
thist = this_proc_features[field][0,:,:,:].transpose(1,2).transpose(0,1)
N = int(np.sqrt(batch_size)) + 1
image_this = torch.zeros(size_x*N, size_y*N)
image_af2 = torch.zeros(size_x*N, size_y*N)
for i in range(N):
for j in range(N):
idx = i*N + j
if idx >= batch_size:
max_i = i
max_j = j
break
image_this[size_x*i:size_x*(i+1), size_y*j:size_y*(j+1)] = thist[idx,:,:]
image_af2[size_x*i:size_x*(i+1), size_y*j:size_y*(j+1)] = af2t[idx,:,:]
fig = plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plt.title(f'AF2: {field}')
plt.imshow(image_af2[:size_x*(max_i-1), :])
plt.subplot(1,2,2)
plt.title(f'This: {field}')
plt.imshow(image_this[:size_x*(max_i-1), :])
plt.tight_layout()
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train deep protein docking')
parser.add_argument('-fasta_path', default='T1024.fas', type=str)
parser.add_argument('-output_dir', default='/media/lupoglaz/AlphaFold2Output', type=str)
parser.add_argument('-model_name', default='model_1', type=str)
parser.add_argument('-data_dir', default='/media/lupoglaz/AlphaFold2Data', type=str)
args = parser.parse_args()
model_config = model_config(args.model_name)
model_config.data.eval.num_ensemble = 1
model_config.data.common.use_templates = False
af2features = AlphaFoldFeatures(config=model_config)
features_path = Path(args.output_dir)/Path('T1024')/Path('features.pkl')
proc_features_path = Path(args.output_dir)/Path('T1024')/Path('proc_features.pkl')
with open(features_path, 'rb') as f:
raw_feature_dict = pickle.load(f)
with open(proc_features_path, 'rb') as f:
af2_proc_features = pickle.load(f)
this_proc_features = af2features(raw_feature_dict, random_seed=42)
common_keys = set(af2_proc_features.keys()) & set(this_proc_features.keys())
missing_keys = set(af2_proc_features.keys()) - common_keys
print(missing_keys)
for k in common_keys:
if k.startswith('template_'):
continue
print(k, af2_proc_features[k].shape, this_proc_features[k].shape)
#Correct:
# print(f'AF2: {af2_proc_features["seq_length"]}\nThis: {this_proc_features["seq_length"]}')
# string_plot(af2_proc_features, this_proc_features, 'residue_index')
# string_plot(af2_proc_features, this_proc_features, 'aatype')
# image_plot(af2_proc_features, this_proc_features, 'bert_mask')
# image_plot(af2_proc_features, this_proc_features, 'extra_msa_mask')
# image_plot(af2_proc_features, this_proc_features, 'true_msa')
# image_plot(af2_proc_features, this_proc_features, 'extra_msa')
# msa_feat_plot(af2_proc_features, this_proc_features)
# image_plot(af2_proc_features, this_proc_features, 'target_feat')
# image_plot(af2_proc_features, this_proc_features, 'msa_mask')
# string_plot(af2_proc_features, this_proc_features, 'extra_msa_row_mask')
# string_plot(af2_proc_features, this_proc_features, 'msa_row_mask')
# string_plot(af2_proc_features, this_proc_features, 'seq_mask')
# image_plot(af2_proc_features, this_proc_features, 'extra_has_deletion')
# image_plot(af2_proc_features, this_proc_features, 'extra_deletion_value') |
import sys
input = sys.stdin.readline
index = 0
while True:
unit_usable, unit_period, vacation = map(int, input().split())
if unit_usable == 0 or unit_period == 0 or vacation == 0:
break
index += 1
can_use = 0
while unit_period < vacation:
can_use += unit_usable
vacation -= unit_period
can_use += vacation if (vacation < unit_usable) else unit_usable
print(f"Case {index}: {can_use}")
|
from .utils import list_categories, create_simple_message
|
"""
Overview
========
Create an anonymous gist on github.
"""
from websnake import Post, ResponseHandle, core, die, JSon, TokenAuth
def handle_done(con, response):
print('Headers:', response.headers.headers)
print('Code:', response.code)
print('Version:', response.version)
print('Reason:', response.reason)
print('Data:', response.content())
die()
if __name__ == '__main__':
data = {
"description": "the description for this gist1",
"public": True, "files": {
"file1.txt": {"content": "String file contents"}}}
request = Post('https://api.github.com/gists', args = {'scope': 'gist'},
payload=JSon(data), auth = TokenAuth('API_TOKEN'))
request.add_map(ResponseHandle.DONE, handle_done)
core.gear.mainloop()
|
# Generated by Django 3.1.2 on 2021-08-03 00:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('characters', '0007_auto_20210802_2046'),
]
operations = [
migrations.RemoveField(
model_name='character',
name='number',
),
migrations.AddField(
model_name='character',
name='image',
field=models.CharField(default='', help_text='URL', max_length=255),
),
]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.transforms import SIGN
from torch.utils.data import DataLoader
import math
from typing import Optional
from torch import Tensor
eps=1e-5
from Precomputing.base import PrecomputingBase
class SAGN(PrecomputingBase):
def __init__(self, args, data, train_idx, n_layers=2, num_heads=1, weight_style="attention", alpha=0.5, focal="first",
hop_norm="softmax", input_drop=0.0, attn_drop=0.0, negative_slope=0.2, zero_inits=False, position_emb=False):
super(SAGN, self).__init__(args, data, train_idx)
num_hops = self.num_layers + 1
in_feats = args.num_feats
hidden = self.dim_hidden
out_feats = args.num_classes
dropout = args.dropout
self._num_heads = num_heads
self._hidden = hidden
self._out_feats = out_feats
self._weight_style = weight_style
self._alpha = alpha
self._hop_norm = hop_norm
self._zero_inits = zero_inits
self._focal = focal
self.dropout = nn.Dropout(dropout)
self.attn_dropout = nn.Dropout(attn_drop)
# self.bn = nn.BatchNorm1d(hidden * num_heads)
self.bn = MultiHeadBatchNorm(num_heads, hidden * num_heads)
self.relu = nn.ReLU()
self.input_drop = nn.Dropout(input_drop)
self.multihop_encoders = nn.ModuleList([GroupMLP(in_feats, hidden, hidden, num_heads, n_layers, dropout) for i in range(num_hops)])
self.res_fc = nn.Linear(in_feats, hidden * num_heads, bias=False)
if weight_style == "attention":
self.hop_attn_l = nn.Parameter(torch.FloatTensor(size=(1, num_heads, hidden)))
self.hop_attn_r = nn.Parameter(torch.FloatTensor(size=(1, num_heads, hidden)))
self.leaky_relu = nn.LeakyReLU(negative_slope)
if position_emb:
self.pos_emb = nn.Parameter(torch.FloatTensor(size=(num_hops, in_feats)))
else:
self.pos_emb = None
self.post_encoder = GroupMLP(hidden, hidden, out_feats, num_heads, n_layers, dropout)
# self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
for encoder in self.multihop_encoders:
encoder.reset_parameters()
nn.init.xavier_uniform_(self.res_fc.weight, gain=gain)
if self._weight_style == "attention":
if self._zero_inits:
nn.init.zeros_(self.hop_attn_l)
nn.init.zeros_(self.hop_attn_r)
else:
nn.init.xavier_normal_(self.hop_attn_l, gain=gain)
nn.init.xavier_normal_(self.hop_attn_r, gain=gain)
if self.pos_emb is not None:
nn.init.xavier_normal_(self.pos_emb, gain=gain)
self.post_encoder.reset_parameters()
self.bn.reset_parameters()
def forward(self, feats, return_attn=False):
out = 0
feats = [self.input_drop(feat) for feat in feats]
if self.pos_emb is not None:
feats = [f +self.pos_emb[[i]] for i, f in enumerate(feats)]
hidden = []
for i in range(len(feats)):
hidden.append(self.multihop_encoders[i](feats[i]).view(-1, self._num_heads, self._hidden))
a = None
if self._weight_style == "attention":
if self._focal == "first":
focal_feat = hidden[0]
if self._focal == "last":
focal_feat = hidden[-1]
if self._focal == "average":
focal_feat = 0
for h in hidden:
focal_feat += h
focal_feat /= len(hidden)
astack_l = [(h * self.hop_attn_l).sum(dim=-1).unsqueeze(-1) for h in hidden]
a_r = (focal_feat * self.hop_attn_r).sum(dim=-1).unsqueeze(-1)
astack = torch.stack([(a_l + a_r) for a_l in astack_l], dim=-1)
if self._hop_norm == "softmax":
a = self.leaky_relu(astack)
a = F.softmax(a, dim=-1)
if self._hop_norm == "sigmoid":
a = torch.sigmoid(astack)
if self._hop_norm == "tanh":
a = torch.tanh(astack)
a = self.attn_dropout(a)
for i in range(a.shape[-1]):
out += hidden[i] * a[:, :, :, i]
if self._weight_style == "uniform":
for h in hidden:
out += h / len(hidden)
if self._weight_style == "exponent":
for k, h in enumerate(hidden):
out += self._alpha ** k * h
out += self.res_fc(feats[0]).view(-1, self._num_heads, self._hidden)
out = out.flatten(1, -1)
out = self.dropout(self.relu(self.bn(out)))
out = out.view(-1, self._num_heads, self._hidden)
out = self.post_encoder(out)
out = out.mean(1)
if return_attn:
return out, a.mean(1) if a is not None else None
else:
return out
################################################################
# DGL's implementation of FeedForwardNet (MLP) for SIGN
class FeedForwardNet(nn.Module):
def __init__(self, in_feats, hidden, out_feats, n_layers, dropout):
super(FeedForwardNet, self).__init__()
self.layers = nn.ModuleList()
self.n_layers = n_layers
if n_layers == 1:
self.layers.append(nn.Linear(in_feats, out_feats))
else:
self.layers.append(nn.Linear(in_feats, hidden))
for i in range(n_layers - 2):
self.layers.append(nn.Linear(hidden, hidden))
self.layers.append(nn.Linear(hidden, out_feats))
if self.n_layers > 1:
self.prelu = nn.PReLU()
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
for layer in self.layers:
nn.init.xavier_uniform_(layer.weight, gain=gain)
nn.init.zeros_(layer.bias)
def forward(self, x):
for layer_id, layer in enumerate(self.layers):
x = layer(x)
if layer_id < self.n_layers - 1:
x = self.dropout(self.prelu(x))
return x
################################################################
# More general MLP layer
class MLP(nn.Module):
def __init__(self, in_feats, hidden, out_feats, n_layers, dropout, input_drop=0., residual=False, normalization="batch"):
super(MLP, self).__init__()
self._residual = residual
self.layers = nn.ModuleList()
self.norms = nn.ModuleList()
self.n_layers = n_layers
self.input_drop = nn.Dropout(input_drop)
if n_layers == 1:
self.layers.append(nn.Linear(in_feats, out_feats))
else:
self.layers.append(nn.Linear(in_feats, hidden))
if normalization == "batch":
self.norms.append(nn.BatchNorm1d(hidden))
if normalization == "layer":
self.norms.append(nn.LayerNorm(hidden))
if normalization == "none":
self.norms.append(nn.Identity())
for i in range(n_layers - 2):
self.layers.append(nn.Linear(hidden, hidden))
if normalization == "batch":
self.norms.append(nn.BatchNorm1d(hidden))
if normalization == "layer":
self.norms.append(nn.LayerNorm(hidden))
if normalization == "none":
self.norms.append(nn.Identity())
self.layers.append(nn.Linear(hidden, out_feats))
if self.n_layers > 1:
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
for layer in self.layers:
nn.init.xavier_uniform_(layer.weight, gain=gain)
nn.init.zeros_(layer.bias)
for norm in self.norms:
norm.reset_parameters()
# print(self.layers[0].weight)
def forward(self, x):
x = self.input_drop(x)
if self._residual:
prev_x = x
for layer_id, layer in enumerate(self.layers):
x = layer(x)
if layer_id < self.n_layers - 1:
x = self.dropout(self.relu(self.norms[layer_id](x)))
if self._residual:
if x.shape[1] == prev_x.shape[1]:
x += prev_x
prev_x = x
return x
# Multi-head (ensemble) MLP, note that different heads are processed
# sequentially
class MultiHeadMLP(nn.Module):
def __init__(self, in_feats, hidden, out_feats, n_heads, n_layers, dropout, input_drop=0., concat=False, residual=False, normalization="batch"):
super().__init__()
self._concat = concat
self.mlp_list = nn.ModuleList([MLP(in_feats, hidden, out_feats, n_layers, dropout, input_drop=input_drop, residual=residual, normalization=normalization) for _ in range(n_heads)])
# self.reset_parameters()
def reset_parameters(self):
for mlp in self.mlp_list:
mlp.reset_parameters()
def forward(self, x):
# x size:
# [N, d_in] or [N, H, d_in]
if len(x.shape) == 3:
out = [mlp(x[:, i, :]) for i, mlp in enumerate(self.mlp_list)]
if len(x.shape) == 2:
out = [mlp(x) for mlp in self.mlp_list]
out = torch.stack(out, dim=1)
if self._concat:
out = out.flatten(1, -1)
return out
class ParallelMLP(nn.Module):
def __init__(self, in_feats, hidden, out_feats, n_heads, n_layers, dropout, input_drop=0., residual=False, normalization="batch"):
super(ParallelMLP, self).__init__()
self._residual = residual
self.layers = nn.ModuleList()
self.norms = nn.ModuleList()
self._n_heads = n_heads
self._n_layers = n_layers
self.input_drop = nn.Dropout(input_drop)
if self._n_layers == 1:
# self.layers.append(MultiHeadLinear(in_feats, out_feats, n_heads))
self.layers.append(nn.Conv1d(in_feats * n_heads, out_feats * n_heads, kernel_size=1, groups=n_heads))
else:
# self.layers.append(MultiHeadLinear(in_feats, hidden, n_heads))
self.layers.append(nn.Conv1d(in_feats * n_heads, hidden * n_heads, kernel_size=1, groups=n_heads))
if normalization == "batch":
# self.norms.append(MultiHeadBatchNorm(n_heads, hidden * n_heads))
self.norms.append(nn.BatchNorm1d(hidden * n_heads))
if normalization == "layer":
self.norms.append(nn.GroupNorm(n_heads, hidden * n_heads))
if normalization == "none":
self.norms.append(nn.Identity())
for i in range(self._n_layers - 2):
# self.layers.append(MultiHeadLinear(hidden, hidden, n_heads))
self.layers.append(nn.Conv1d(hidden * n_heads, hidden * n_heads, kernel_size=1, groups=n_heads))
if normalization == "batch":
# self.norms.append(MultiHeadBatchNorm(n_heads, hidden * n_heads))
self.norms.append(nn.BatchNorm1d(hidden * n_heads))
if normalization == "layer":
self.norms.append(nn.GroupNorm(n_heads, hidden * n_heads))
if normalization == "none":
self.norms.append(nn.Identity())
# self.layers.append(MultiHeadLinear(hidden, out_feats, n_heads))
self.layers.append(nn.Conv1d(hidden * n_heads, out_feats * n_heads, kernel_size=1, groups=n_heads))
if self._n_layers > 1:
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
# for head in range(self._n_heads):
# for layer in self.layers:
# nn.init.kaiming_uniform_(layer.weight[head], a=math.sqrt(5))
# if layer.bias is not None:
# fan_in, _ = nn.init._calculate_fan_in_and_fan_out(layer.weight[head])
# bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
# nn.init.uniform_(layer.bias[head], -bound, bound)
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
for head in range(self._n_heads):
for layer in self.layers:
nn.init.xavier_uniform_(layer.weight[head], gain=gain)
if layer.bias is not None:
nn.init.zeros_(layer.bias[head])
for norm in self.norms:
norm.reset_parameters()
# for norm in self.norms:
# norm.moving_mean[head].zero_()
# norm.moving_var[head].fill_(1)
# if norm._affine:
# nn.init.ones_(norm.scale[head])
# nn.init.zeros_(norm.offset[head])
# print(self.layers[0].weight[0])
def forward(self, x):
x = self.input_drop(x)
if len(x.shape) == 2:
x = x.view(-1, 1, x.shape[1])
x = x.repeat(1, self._n_heads, 1)
# x = x.repeat(1, self._n_heads).unsqueeze(-1)
if len(x.shape) == 3:
x = x.flatten(1, -1).unsqueeze(-1)
if self._residual:
prev_x = x
for layer_id, layer in enumerate(self.layers):
x = layer(x)
# x = x.flatten(1, -1)
if layer_id < self._n_layers - 1:
shape = x.shape
x = self.dropout(self.relu(self.norms[layer_id](x)))
# x = x.reshape(shape=shape)
if self._residual:
if x.shape[2] == prev_x.shape[2]:
x += prev_x
prev_x = x
x = x.view(-1, self._n_heads, x.shape[1] // self._n_heads)
return x
################################################################
# Modified multi-head Linear layer
class MultiHeadLinear(nn.Module):
def __init__(self, in_feats, out_feats, n_heads, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.FloatTensor(size=(n_heads, in_feats, out_feats)))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(size=(n_heads, 1, out_feats)))
else:
self.bias = None
def reset_parameters(self) -> None:
for weight, bias in zip(self.weight, self.bias):
nn.init.kaiming_uniform_(weight, a=math.sqrt(5))
if bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(bias, -bound, bound)
# def reset_parameters(self):
# gain = nn.init.calculate_gain("relu")
# for weight in self.weight:
# nn.init.xavier_uniform_(weight, gain=gain)
# if self.bias is not None:
# nn.init.zeros_(self.bias)
def forward(self, x):
# input size: [N, d_in] or [H, N, d_in]
# output size: [H, N, d_out]
if len(x.shape) == 3:
x = x.transpose(0, 1)
x = torch.matmul(x, self.weight)
if self.bias is not None:
x += self.bias
return x.transpose(0, 1)
# Modified multi-head BatchNorm1d layer
class MultiHeadBatchNorm(nn.Module):
def __init__(self, n_heads, in_feats, momentum=0.1, affine=True, device=None,
dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
assert in_feats % n_heads == 0
self._in_feats = in_feats
self._n_heads = n_heads
self._momentum = momentum
self._affine = affine
if affine:
self.weight = nn.Parameter(torch.empty(size=(n_heads, in_feats // n_heads)))
self.bias = nn.Parameter(torch.empty(size=(n_heads, in_feats // n_heads)))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
self.register_buffer("running_mean", torch.zeros(size=(n_heads, in_feats // n_heads)))
self.register_buffer("running_var", torch.ones(size=(n_heads, in_feats // n_heads)))
self.running_mean: Optional[Tensor]
self.running_var: Optional[Tensor]
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_() # type: ignore[union-attr]
self.running_var.fill_(1) # type: ignore[union-attr]
if self._affine:
nn.init.zeros_(self.bias)
for weight in self.weight:
nn.init.ones_(weight)
def forward(self, x):
assert x.shape[1] == self._in_feats
x = x.view(-1, self._n_heads, self._in_feats // self._n_heads)
self.running_mean = self.running_mean.to(x.device)
self.running_var = self.running_var.to(x.device)
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
if bn_training:
mean = x.mean(dim=0, keepdim=True)
var = x.var(dim=0, unbiased=False, keepdim=True)
out = (x-mean) * torch.rsqrt(var + eps)
self.running_mean = (1 - self._momentum) * self.running_mean + self._momentum * mean.detach()
self.running_var = (1 - self._momentum) * self.running_var + self._momentum * var.detach()
else:
out = (x - self.running_mean) * torch.rsqrt(self.running_var + eps)
if self._affine:
out = out * self.weight + self.bias
return out
# Another multi-head MLP defined from scratch
class GroupMLP(nn.Module):
def __init__(self, in_feats, hidden, out_feats, n_heads, n_layers, dropout, input_drop=0., residual=False, normalization="batch"):
super(GroupMLP, self).__init__()
self._residual = residual
self.layers = nn.ModuleList()
self.norms = nn.ModuleList()
self._n_heads = n_heads
self._n_layers = n_layers
self.input_drop = nn.Dropout(input_drop)
if self._n_layers == 1:
self.layers.append(MultiHeadLinear(in_feats, out_feats, n_heads))
else:
self.layers.append(MultiHeadLinear(in_feats, hidden, n_heads))
if normalization == "batch":
self.norms.append(MultiHeadBatchNorm(n_heads, hidden * n_heads))
# self.norms.append(nn.BatchNorm1d(hidden * n_heads))
if normalization == "layer":
self.norms.append(nn.GroupNorm(n_heads, hidden * n_heads))
if normalization == "none":
self.norms.append(nn.Identity())
for i in range(self._n_layers - 2):
self.layers.append(MultiHeadLinear(hidden, hidden, n_heads))
if normalization == "batch":
self.norms.append(MultiHeadBatchNorm(n_heads, hidden * n_heads))
# self.norms.append(nn.BatchNorm1d(hidden * n_heads))
if normalization == "layer":
self.norms.append(nn.GroupNorm(n_heads, hidden * n_heads))
if normalization == "none":
self.norms.append(nn.Identity())
self.layers.append(MultiHeadLinear(hidden, out_feats, n_heads))
if self._n_layers > 1:
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
for head in range(self._n_heads):
for layer in self.layers:
nn.init.kaiming_uniform_(layer.weight[head], a=math.sqrt(5))
if layer.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(layer.weight[head])
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(layer.bias[head], -bound, bound)
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
for head in range(self._n_heads):
for layer in self.layers:
nn.init.xavier_uniform_(layer.weight[head], gain=gain)
if layer.bias is not None:
nn.init.zeros_(layer.bias[head])
for norm in self.norms:
norm.reset_parameters()
# for norm in self.norms:
# norm.moving_mean[head].zero_()
# norm.moving_var[head].fill_(1)
# if norm._affine:
# nn.init.ones_(norm.scale[head])
# nn.init.zeros_(norm.offset[head])
# print(self.layers[0].weight[0])
def forward(self, x):
x = self.input_drop(x)
if len(x.shape) == 2:
x = x.view(-1, 1, x.shape[1])
if self._residual:
prev_x = x
for layer_id, layer in enumerate(self.layers):
x = layer(x)
if layer_id < self._n_layers - 1:
shape = x.shape
x = x.flatten(1, -1)
x = self.dropout(self.relu(self.norms[layer_id](x)))
x = x.reshape(shape=shape)
if self._residual:
if x.shape[2] == prev_x.shape[2]:
x += prev_x
prev_x = x
return x
|
from speechrecproj.data import *
signals = tf.placeholder(tf.float32, [None, 16000])
def main():
sample_manager = SamplesManager('data')
print(len(sample_manager.files_labels))
print(sample_manager.files_labels[0])
print(Label.all_labels)
sample_manager.files_labels[0].get_wav()
tfreader = TFRecordReader(filename='data/train.tfrecord', validation_set_size=6000, batch_size=600)
wavs, labels = tfreader.next_training_batch()
speechrecproj.experiment.hyper_parameter_search.hyper_parameter_search()
# with tf.Session() as sess:
# result = sess.run(wavs)
# print(result)
# with tf.Session() as sess:
# result = sess.run(wavs)
# print(result)
# experiment.hyper_parameter_search.hyper_parameter_search(trainset, valset)
if __name__ == '__main__':
main()
|
#-*- coding: utf-8 -*-
import os.path
from .base_filter import BaseFilter
class StopwordsFilter(BaseFilter):
def __init__(self, country):
super(StopwordsFilter, self).__init__()
self.country = country
stopword_fname = '%s.txt' % self.country
folder_name = os.path.dirname(__file__)
self.fname = os.path.join(folder_name, 'stopwords', stopword_fname)
with open(self.fname, 'rb') as f:
self.stopwords = {l.strip().decode('utf8') for l in f if l}
def predicate(self, tok):
"""Returns True if tok not in stopwords else False"""
return tok not in self.stopwords
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from endorsement.services import endorsement_services
from endorsement.dao.uwnetid_supported import get_supported_resources_for_netid
from endorsement.dao.user import get_endorser_model
from endorsement.test.dao import TestDao
class TestNetidSupported(TestDao):
def test_get_supported_netids_for_netid(self):
endorser = get_endorser_model('jstaff')
supported = get_supported_resources_for_netid(endorser.netid)
self.assertEqual(len(supported), 23)
netids = set()
for s in supported:
for service in endorsement_services():
if service.valid_supported_netid(s, endorser):
netids.add(s.name)
break
self.assertEqual(len(netids), 16)
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def get_res_net_18(ensemble, **kwargs):
if ensemble is None:
return ResNet18(**kwargs)
elif ensemble == "early_exit":
return ResNet18EarlyExit(**kwargs)
elif ensemble == "mc_dropout":
return ResNet18MCDrop(**kwargs)
elif ensemble == "deep":
return ResNet18(**kwargs)
elif ensemble == "depth":
return ResNet18Depth(**kwargs)
else:
NotImplementedError("ensemble not implemented: '{}'".format(ensemble))
def init_weights(model):
for module in model.modules():
if isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(module, nn.BatchNorm1d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv1d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def _conv1x1(in_planes, out_planes, stride=1):
return nn.Conv1d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet18(nn.Module):
name = "res_net_18"
def __init__(self, out_channels, seed=None):
super().__init__()
self.out_channels = out_channels
self.seed = seed
self.hidden_sizes = [64, 128, 256, 512]
self.layers = [2, 2, 2, 2]
self.strides = [1, 2, 2, 2]
self.inplanes = self.hidden_sizes[0]
in_block = [nn.Conv1d(1, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)]
in_block += [nn.BatchNorm1d(self.inplanes)]
in_block += [nn.ReLU(inplace=True)]
in_block += [nn.MaxPool1d(kernel_size=3, stride=2, padding=1)]
self.in_block = nn.Sequential(*in_block)
blocks = []
for h, l, s in zip(self.hidden_sizes, self.layers, self.strides):
blocks += [self._make_layer(h, l, s)]
self.blocks = nn.Sequential(*blocks)
out_block = [nn.AdaptiveAvgPool1d(1)]
out_block += [nn.Flatten(1)]
out_block += [nn.Linear(self.hidden_sizes[-1], self.out_channels)]
self.out_block = nn.Sequential(*out_block)
if self.seed is not None:
torch.manual_seed(seed)
self.apply(init_weights)
def _make_layer(self, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes:
downsample = nn.Sequential(_conv1x1(self.inplanes, planes, stride), nn.BatchNorm1d(planes))
layers = [BasicBlock(self.inplanes, planes, stride, downsample)]
self.inplanes = planes
for _ in range(1, blocks):
layers += [BasicBlock(self.inplanes, planes)]
return nn.Sequential(*layers)
def forward(self, x):
x = self.in_block(x)
x = self.blocks(x)
x = self.out_block(x)
return x
class ExitBlock(nn.Module):
def __init__(self, in_channels, hidden_sizes, out_channels):
super().__init__()
layers = [nn.AdaptiveAvgPool1d(1)]
layers += [nn.Flatten(1)]
layers += [nn.Linear(in_channels, hidden_sizes)]
layers += [nn.ReLU()]
layers += [nn.Linear(hidden_sizes, out_channels)]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ResNet18EarlyExit(ResNet18):
name = "res_net_18_early_exit"
def __init__(self, *args, exit_after=-1, complexity_factor=1.2, **kwargs):
self.exit_after = exit_after
self.complexity_factor = complexity_factor
super().__init__(*args, **kwargs)
to_exit = [2, 8, 15, 24, 31, 40, 47, 56]
hidden_sizes = len(self.hidden_sizes)
num_hidden = len(self.hidden_sizes)
exit_hidden_sizes = [int(((self.complexity_factor ** 0.5) ** (num_hidden - idx)) * self.hidden_sizes[-1]) for idx in range(num_hidden)]
exit_hidden_sizes = [h for pair in zip(exit_hidden_sizes, exit_hidden_sizes) for h in pair]
if self.exit_after == -1:
self.exit_after = range(len(to_exit))
num_exits = len(to_exit)
if (len(self.exit_after) > num_exits) or not set(self.exit_after).issubset(list(range(num_exits))):
raise ValueError("valid exit points: {}".format(", ".join(str(n) for n in range(num_exits))))
self.exit_hidden_sizes = np.array(exit_hidden_sizes)[self.exit_after]
blocks = []
for idx, module in enumerate(self.blocks.modules()):
if idx in to_exit:
blocks += [module]
self.blocks = nn.ModuleList(blocks)
idx = 0
exit_blocks = []
for block_idx, block in enumerate(self.blocks):
if block_idx in self.exit_after:
in_channels = block.conv1.out_channels
exit_blocks += [ExitBlock(in_channels, self.exit_hidden_sizes[idx], self.out_channels)]
idx += 1
self.exit_blocks = nn.ModuleList(exit_blocks)
self.apply(init_weights)
def forward(self, x):
out = self.in_block(x)
out_blocks = []
for block in self.blocks:
out = block(out)
out_blocks += [out]
out_exits = []
for exit_after, exit_block in zip(self.exit_after, self.exit_blocks):
out = exit_block(out_blocks[exit_after])
out_exits += [out]
out = self.out_block(out_blocks[-1])
out = torch.stack(out_exits + [out], dim=1)
return out
class MCDropout(nn.Dropout):
def forward(self, x):
return F.dropout(x, self.p, True, self.inplace)
class ResNet18MCDrop(ResNet18EarlyExit):
name = "res_net_18_mc_drop"
def __init__(self, *args, drop_after=-1, drop_prob=0.2, **kwargs):
self.drop_after = drop_after
self.drop_prob = drop_prob
super().__init__(*args, exit_after=drop_after, **kwargs)
self.drop_after = self.exit_after
self.__delattr__("exit_after")
self.__delattr__("exit_blocks")
for block_idx in self.drop_after:
self.blocks[block_idx].add_module("dropout", MCDropout(self.drop_prob))
def forward(self, x):
x = self.in_block(x)
x = self.blocks(x)
x = self.out_block(x)
return x
class ResNet18Depth(ResNet18):
name = "res_net_18_depth"
def __init__(self, *args, max_depth=1, **kwargs):
self.max_depth = max_depth
super().__init__(*args, **kwargs)
num_blocks = len(self.hidden_sizes)
if self.max_depth == -1:
self.max_depth = len(self.hidden_sizes)
elif (max_depth > num_blocks) or (max_depth < 1):
raise ValueError("valid depths: {}".format(", ".join(str(n) for n in range(1, num_blocks + 1))))
self.blocks = self.blocks[:self.max_depth]
out_block = [nn.AdaptiveAvgPool1d(1)]
out_block += [nn.Flatten(1)]
out_block += [nn.Linear(self.hidden_sizes[self.max_depth - 1], self.out_channels)]
self.out_block = nn.Sequential(*out_block) |
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
import unittest
import json
import helper
import config
class QueryTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(QueryTest, self).__init__(*args, **kwargs)
self.base_domain = helper.getTestDomainName(self.__class__.__name__)
helper.setupDomain(self.base_domain)
self.endpoint = helper.getEndpoint()
def setUp(self):
self.session = helper.getSession()
def tearDown(self):
if self.session:
self.session.close()
# main
def testSimpleQuery(self):
# Test query value for 1d dataset
print("testSimpleQuery", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
req = self.endpoint + '/'
# Get root uuid
rsp = self.session.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
#
#create 1d dataset
#
fixed_str4_type = {"charSet": "H5T_CSET_ASCII",
"class": "H5T_STRING",
"length": 4,
"strPad": "H5T_STR_NULLPAD" }
fixed_str8_type = {"charSet": "H5T_CSET_ASCII",
"class": "H5T_STRING",
"length": 8,
"strPad": "H5T_STR_NULLPAD" }
fields = ( {'name': 'symbol', 'type': fixed_str4_type},
{'name': 'date', 'type': fixed_str8_type},
{'name': 'open', 'type': 'H5T_STD_I32LE'},
{'name': 'close', 'type': 'H5T_STD_I32LE'} )
datatype = {'class': 'H5T_COMPOUND', 'fields': fields }
num_elements = 12
payload = {'type': datatype, 'shape': num_elements}
req = self.endpoint + "/datasets"
rsp = self.session.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset1'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = self.session.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# write entire array
value = [
("EBAY", "20170102", 3023, 3088),
("AAPL", "20170102", 3054, 2933),
("AMZN", "20170102", 2973, 3011),
("EBAY", "20170103", 3042, 3128),
("AAPL", "20170103", 3182, 3034),
("AMZN", "20170103", 3021, 2788),
("EBAY", "20170104", 2798, 2876),
("AAPL", "20170104", 2834, 2867),
("AMZN", "20170104", 2891, 2978),
("EBAY", "20170105", 2973, 2962),
("AAPL", "20170105", 2934, 3010),
("AMZN", "20170105", 3018, 3086)
]
payload = {'value': value}
req = self.endpoint + "/datasets/" + dset_uuid + "/value"
rsp = self.session.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 200) # write value
# read first row with AAPL
params = {'query': "symbol == b'AAPL'" }
params["Limit"] = 1
rsp = self.session.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
#self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
self.assertTrue("index" in rspJson)
readData = rspJson["value"]
self.assertEqual(len(readData), 1)
item = readData[0]
self.assertEqual(item, ["AAPL", "20170102", 3054, 2933])
self.assertEqual(item[0], "AAPL")
indices = rspJson["index"]
self.assertEqual(indices, [1])
del params["Limit"]
# get back rows for AAPL
rsp = self.session.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
self.assertTrue("index" in rspJson)
readData = rspJson["value"]
self.assertEqual(len(readData), 4)
for item in readData:
self.assertEqual(item[0], "AAPL")
indices = rspJson["index"]
self.assertEqual(indices, [1,4,7,10])
# combine with a selection
params["select"] = "[2:12]"
rsp = self.session.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
#self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
self.assertTrue("index" in rspJson)
readData = rspJson["value"]
self.assertEqual(len(readData), 3)
for item in readData:
self.assertEqual(item[0], "AAPL")
indices = rspJson["index"]
self.assertEqual(indices, [4,7,10])
# combine with Limit
params["Limit"] = 2
rsp = self.session.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
#self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
self.assertTrue("index" in rspJson)
readData = rspJson["value"]
self.assertEqual(len(readData), 2)
for item in readData:
self.assertEqual(item[0], "AAPL")
indices = rspJson["index"]
self.assertEqual(indices, [4,7])
# try bad Limit
params["Limit"] = "abc"
rsp = self.session.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 400)
# try invalid query string
params = {'query': "foobar" }
rsp = self.session.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 400)
def testChunkedRefIndirectDataset(self):
print("testChunkedRefIndirectDatasetQuery", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
hdf5_sample_bucket = config.get("hdf5_sample_bucket")
if not hdf5_sample_bucket:
print("hdf5_sample_bucket config not set, skipping testChunkedRefIndirectDataset")
return
s3path = "s3://" + hdf5_sample_bucket + "/data/hdf5test" + "/snp500.h5"
SNP500_ROWS = 3207353
snp500_json = helper.getHDF5JSON("snp500.json")
if not snp500_json:
print("snp500.json file not found, skipping testChunkedRefDataset")
return
if "snp500.h5" not in snp500_json:
self.assertTrue(False)
chunk_dims = [60000,] # chunk layout used in snp500.h5 file
num_chunks = (SNP500_ROWS // chunk_dims[0]) + 1
chunk_info = snp500_json["snp500.h5"]
dset_info = chunk_info["/dset"]
if "byteStreams" not in dset_info:
self.assertTrue(False)
byteStreams = dset_info["byteStreams"]
self.assertEqual(len(byteStreams), num_chunks)
chunkinfo_data = [(0,0)]*num_chunks
# fill the numpy array with info from bytestreams data
for i in range(num_chunks):
item = byteStreams[i]
index = item["index"]
chunkinfo_data[index] = (item["file_offset"], item["size"])
# get domain
req = helper.getEndpoint() + '/'
rsp = self.session.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create table to hold chunkinfo
# create a dataset to store chunk info
fields = ({'name': 'offset', 'type': 'H5T_STD_I64LE'},
{'name': 'size', 'type': 'H5T_STD_I32LE'})
chunkinfo_type = {'class': 'H5T_COMPOUND', 'fields': fields }
req = self.endpoint + "/datasets"
# Store 40 chunk locations
chunkinfo_dims = [num_chunks,]
payload = {'type': chunkinfo_type, 'shape': chunkinfo_dims }
req = self.endpoint + "/datasets"
rsp = self.session.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
chunkinfo_uuid = rspJson['id']
self.assertTrue(helper.validateId(chunkinfo_uuid))
# link new dataset as 'chunks'
name = "chunks"
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": chunkinfo_uuid}
rsp = self.session.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# write to the chunkinfo dataset
payload = {'value': chunkinfo_data}
req = self.endpoint + "/datasets/" + chunkinfo_uuid + "/value"
rsp = self.session.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 200) # write value
# define types we need
s10_type = {"charSet": "H5T_CSET_ASCII",
"class": "H5T_STRING",
"length": 10,
"strPad": "H5T_STR_NULLPAD" }
s4_type = {"charSet": "H5T_CSET_ASCII",
"class": "H5T_STRING",
"length": 4,
"strPad": "H5T_STR_NULLPAD" }
fields = ({'name': 'date', 'type': s10_type},
{'name': 'symbol', 'type': s4_type},
{'name': 'sector', 'type': 'H5T_STD_I8LE'},
{'name': 'open', 'type': 'H5T_IEEE_F32LE'},
{'name': 'high', 'type': 'H5T_IEEE_F32LE'},
{'name': 'low', 'type': 'H5T_IEEE_F32LE'},
{'name': 'volume', 'type': 'H5T_IEEE_F32LE'},
{'name': 'close', 'type': 'H5T_IEEE_F32LE'})
datatype = {'class': 'H5T_COMPOUND', 'fields': fields }
data = { "type": datatype, "shape": [SNP500_ROWS,] }
layout = {"class": 'H5D_CHUNKED_REF_INDIRECT', "file_uri": s3path, "dims": chunk_dims, "chunk_table": chunkinfo_uuid}
data['creationProperties'] = {'layout': layout}
req = self.endpoint + '/datasets'
rsp = self.session.post(req, data=json.dumps(data), headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
dset_id = rspJson["id"]
self.assertTrue(helper.validateId(dset_id))
# link new dataset as 'dset'
name = "dset"
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_id}
rsp = self.session.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# read a selection
req = self.endpoint + "/datasets/" + dset_id + "/value"
params = {'query': "symbol == b'AAPL'" } # query for AAPL
#params = {'query': "symbol == b'CVX'" } # query for CVX
#params["select"] = "[0:100]"
params["nonstrict"] = 1 # enable SN to invoke lambda func
rsp = self.session.get(req, params=params, headers=headers)
if rsp.status_code == 404:
print("s3object: {} not found, skipping hyperslab read chunk reference indirect test".format(s3path))
return
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
#self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
self.assertTrue("index" in rspJson)
readData = rspJson["value"]
self.assertEqual(len(readData), 8813)
item = readData[0]
self.assertEqual(item[0], "1980.12.12")
self.assertEqual(item[1], "AAPL")
indices = rspJson["index"]
self.assertEqual(len(indices), 8813)
self.assertEqual(indices[0], 128912)
def testPutQuery(self):
# Test PUT query for 1d dataset
print("testPutQuery", self.base_domain)
headers = helper.getRequestHeaders(domain=self.base_domain)
req = self.endpoint + '/'
# Get root uuid
rsp = self.session.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
#
#create 1d dataset
#
fixed_str4_type = {"charSet": "H5T_CSET_ASCII",
"class": "H5T_STRING",
"length": 4,
"strPad": "H5T_STR_NULLPAD" }
fixed_str8_type = {"charSet": "H5T_CSET_ASCII",
"class": "H5T_STRING",
"length": 8,
"strPad": "H5T_STR_NULLPAD" }
fields = ( {'name': 'symbol', 'type': fixed_str4_type},
{'name': 'date', 'type': fixed_str8_type},
{'name': 'open', 'type': 'H5T_STD_I32LE'},
{'name': 'close', 'type': 'H5T_STD_I32LE'} )
datatype = {'class': 'H5T_COMPOUND', 'fields': fields }
num_elements = 12
payload = {'type': datatype, 'shape': num_elements}
req = self.endpoint + "/datasets"
rsp = self.session.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset1'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = self.session.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# write entire array
value = [
("EBAY", "20170102", 3023, 3088),
("AAPL", "20170102", 3054, 2933),
("AMZN", "20170102", 2973, 3011),
("EBAY", "20170103", 3042, 3128),
("AAPL", "20170103", 3182, 3034),
("AMZN", "20170103", 3021, 2788),
("EBAY", "20170104", 2798, 2876),
("AAPL", "20170104", 2834, 2867),
("AMZN", "20170104", 2891, 2978),
("EBAY", "20170105", 2973, 2962),
("AAPL", "20170105", 2934, 3010),
("AMZN", "20170105", 3018, 3086)
]
payload = {'value': value}
req = self.endpoint + "/datasets/" + dset_uuid + "/value"
rsp = self.session.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 200) # write value
# set any rows with AAPL to have open of 999
params = {'query': "symbol == b'AAPL'" }
update_value = {"open": 999}
payload = {'value': update_value}
rsp = self.session.put(req, params=params, data=json.dumps(update_value), headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
self.assertTrue("index" in rspJson)
readData = rspJson["value"]
self.assertEqual(len(readData), 4)
for item in readData:
self.assertEqual(item[0], "AAPL")
indices = rspJson["index"]
self.assertEqual(indices, [1,4,7,10])
# read values and verify the expected changes where made
req = self.endpoint + "/datasets/" + dset_uuid + "/value"
rsp = self.session.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
read_values = rspJson["value"]
self.assertEqual(len(read_values), len(value))
for i in range(len(value)):
orig_item = value[i]
mod_item = read_values[i]
self.assertEqual(orig_item[0], mod_item[0])
self.assertEqual(orig_item[1], mod_item[1])
self.assertEqual(orig_item[3], mod_item[3])
if orig_item[0] == "AAPL":
self.assertEqual(mod_item[2], 999)
else:
self.assertEqual(orig_item[2], mod_item[2])
# re-write values
payload = {'value': value}
req = self.endpoint + "/datasets/" + dset_uuid + "/value"
rsp = self.session.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 200) # write value
# set just one row with AAPL to have open of 42
params = {'query': "symbol == b'AAPL'" }
params["Limit"] = 1
update_value = {"open": 999}
payload = {'value': update_value}
rsp = self.session.put(req, params=params, data=json.dumps(update_value), headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("value" in rspJson)
self.assertTrue("index" in rspJson)
readData = rspJson["value"]
self.assertEqual(len(readData), 1)
for item in readData:
self.assertEqual(item[0], "AAPL")
indices = rspJson["index"]
self.assertEqual(indices, [1])
# read values and verify the expected changes where made
req = self.endpoint + "/datasets/" + dset_uuid + "/value"
rsp = self.session.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
read_values = rspJson["value"]
self.assertEqual(len(read_values), len(value))
for i in range(len(value)):
orig_item = value[i]
mod_item = read_values[i]
self.assertEqual(orig_item[0], mod_item[0])
self.assertEqual(orig_item[1], mod_item[1])
self.assertEqual(orig_item[3], mod_item[3])
if orig_item[0] == "AAPL" and i == 1:
self.assertEqual(mod_item[2], 999)
else:
self.assertEqual(orig_item[2], mod_item[2])
if __name__ == '__main__':
#setup test files
unittest.main()
|
from bokeh.io import show, export_png
from bokeh.plotting import figure
from bokeh.layouts import row
import numpy as np
A = np.array([[1,.8],[.8,1]])
B = np.array([[1,.4],[.4,1]])
C = np.array([[1,-.8],[-.8,1]])
a = np.random.multivariate_normal([1,3],cov=A,size=100)
b = np.random.multivariate_normal([-1,2],cov=B,size=100)
c = np.random.multivariate_normal([0,0],cov=C,size=100)
f1 = figure(title='r=.8',width=300,height=300)
f1.xaxis.axis_label='x'
f1.yaxis.axis_label='y'
f1.scatter(x=a[:,0],y=a[:,1])
f2 = figure(title='r=.4',width=300,height=300)
f2.xaxis.axis_label='x'
f2.yaxis.axis_label='y'
f2.scatter(x=b[:,0],y=b[:,1])
f3 = figure(title='r=-.8',width=300,height=300)
f3.xaxis.axis_label='x'
f3.yaxis.axis_label='y'
f3.scatter(x=c[:,0],y=c[:,1])
export_png(row(f1,f2,f3),filename='../img/correlation.png')
show(row(f1,f2,f3))
|
num=[]
nam=[]
while True:
print("-"*30)
print("1.请您任意输入两个数字")
print("2.退出系统")
try:
key=int(input("请选择功能(输入序号1到2):"))
except:
print("您的输入有误,请输入序号1到2")
continue
if key==1:#输入并比较数字。
try:
num=int(input("请输入您猜的第一个数字:"))
nam=int(input("请输入您猜的第二个数字:"))
except ValueError:
print("输入错误,输入数字而不是字符,请重试")
if num > nam:
print("第一个数字大于第二个数字")
if num < nam:
print("第一个数字小于第二个数字")
if num == nam:
print("第一个数字等于第二个数字")
if key==2:
quitConfirm=input("确认要退出本系统吗(Y或者N)?")
if quitConfirm.upper()=="Y":
print("欢迎使用本系统,谢谢!")
break
else:
print("您输入有误,请重新输入") |
import tensorflow as tf
from tensorflow.contrib import slim
from .base_model import BaseModel, Mode
from .backbones import resnet_v1 as resnet
from .layers import delf_attention, image_normalization, dimensionality_reduction
class Delf(BaseModel):
input_spec = {
'image': {'shape': [None, None, None, None], 'type': tf.float32}
}
required_config_keys = []
default_config = {
'normalize_input': False,
'use_attention': False,
'attention_kernel': 1,
'normalize_feature_map': True,
'normalize_average': True,
'dimensionality_reduction': None,
'proj_regularizer': 0.,
}
@staticmethod
def tower(image, mode, config):
image = image_normalization(image)
if image.shape[-1] == 1:
image = tf.tile(image, [1, 1, 1, 3])
with slim.arg_scope(resnet.resnet_arg_scope()):
is_training = config['train_backbone'] and (mode == Mode.TRAIN)
with slim.arg_scope([slim.conv2d, slim.batch_norm], trainable=is_training):
_, encoder = resnet.resnet_v1_50(image,
is_training=is_training,
global_pool=False,
scope='resnet_v1_50')
feature_map = encoder['resnet_v1_50/block3']
if config['use_attention']:
descriptor = delf_attention(feature_map, config, mode == Mode.TRAIN,
resnet.resnet_arg_scope())
else:
descriptor = tf.reduce_max(feature_map, [1, 2])
if config['dimensionality_reduction']:
descriptor = dimensionality_reduction(descriptor, config)
return descriptor
def _model(self, inputs, mode, **config):
# This model does not support training
config['train_backbone'] = False
config['train_attention'] = False
return {'descriptor': self.tower(inputs['image'], mode, config)}
def _loss(self, outputs, inputs, **config):
raise NotImplementedError
def _metrics(self, outputs, inputs, **config):
raise NotImplementedError
|
"""
stuff for testing Phenny modules
example:
if __name__ == '__main__':
import sys
sys.path.extend(('.','..')) # so we can find phennytest
from phennytest import PhennyFake, CommandInputFake
PHENNYFAKE = PhennyFake()
CMDFAKE = CommandInputFake('.wub a dub dub')
wub(PHENNYFAKE, CMDFAKE)
"""
import sys, re
class ConfigFake():
" fakes the module created by importing ~/.phenny/phenny.conf "
def __init__(self):
self.nick = 'botnick'
self.name = 'Phenny Palmersbot,'
self.channels = ['#test']
self.password = None
self.owner = 'botowner'
self.admins = [self.owner, 'botadmin']
self.prefix = r'\.'
class PhennyFake(object):
" use in place of PhennyWrapped "
@staticmethod
def say(mmm):
print mmm
@staticmethod
def reply(mmm):
PhennyFake.say("nick: "+mmm)
@staticmethod
def write(args, trailing=None):
mesg = ': '+' '.join(args)
if trailing:
mesg += ' :'+trailing
print mesg
@staticmethod
def msg(destination, text):
PhennyFake.write(('PRIVMSG', destination), text)
@staticmethod
def log(mesg):
sys.stderr.write(mesg)
def __init__(self, config=None):
if not config:
config = ConfigFake()
if not hasattr(config,'prefix') or not config.prefix :
config.prefix = r'\.'
self.config = config
self.doc = {}
self.stats = {}
self.variables = {}
self.stack = list()
class CommandInputFake(unicode):
" use in place of CommandInput "
def __new__(cls, text):
cmd_re = r'^[\.\!](\S+)(?: +(.*))?$'
cif = unicode.__new__(cls, text)
cif.sender = '#test'
cif.nick = 'self'
cif.admin = False
cif.event = 'PRIVMSG'
cif.bytes = ':'+text
cif.match = re.match(cmd_re, text)
cif.groups = cif.match.groups
cif.group = cif.match.group
cif.args = ()
return cif
|
from .models import Slot, Service, Booking
from django.contrib import admin
# Register your models here.
@admin.register(Booking)
class BookingAdmin(admin.ModelAdmin):
list_display = ["user", "slot", "start", "end"]
search_fields = ["user", "slot", "service", "school"]
|
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
setup(
name='pythontabcmd2',
url='https://github.com/tableau/tabcmd2',
packages=find_packages(),
entry_points={
'console_scripts': [
'tabcmd2 = pythontabcmd2.tabcmd2:main'
]
},
test_suite='tests',
install_requires=[
'requests>=2.11,<3.0',
'urllib3>=1.24.3,<2.0',
'tableauserverclient>=0.12'
],
tests_require=[
'requests-mock>=1.0,<2.0',
'pytest',
'mock'
]
) |
# mira.py
# -------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# Mira implementation
import util
PRINT = True
class MiraClassifier:
"""
Mira classifier.
Note that the variable 'datum' in this code refers to a counter of features
(not to a raw samples.Datum).
"""
def __init__( self, legalLabels, max_iterations):
self.legalLabels = legalLabels
self.type = "mira"
self.automaticTuning = False
self.C = 0.001
self.legalLabels = legalLabels
self.max_iterations = max_iterations
self.initializeWeightsToZero()
def initializeWeightsToZero(self):
"Resets the weights of each label to zero vectors"
self.weights = {}
for label in self.legalLabels:
self.weights[label] = util.Counter() # this is the data-structure you should use
def train(self, trainingData, trainingLabels, validationData, validationLabels):
"Outside shell to call your method. Do not modify this method."
self.features = trainingData[0].keys() # this could be useful for your code later...
if (self.automaticTuning):
Cgrid = [0.002, 0.004, 0.008]
else:
Cgrid = [self.C]
return self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, Cgrid)
def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, Cgrid):
"""
This method sets self.weights using MIRA. Train the classifier for each value of C in Cgrid,
then store the weights that give the best accuracy on the validationData.
Use the provided self.weights[label] data structure so that
the classify method works correctly. Also, recall that a
datum is a counter from features to values for those features
representing a vector of values.
"""
"*** YOUR CODE HERE ***"
""" Basically perceptron with the weights adjusted using an additional parameter
for incorrect classifications we use weight^correct label += t * data and
weight^prediction -= t * data. Where t is the min of the following equation
t = min{c = 0.001, (weight[prediction] - weight[actual]) * data + 1 / 2 * (data * data)} """
for iteration in range(self.max_iterations): #iterate through the iterations
for i in range(len(trainingData)): #iterate through the training data
bestScore = -float("inf")
bestLabel = None
currentData = trainingData[i] # currentData
for label in self.legalLabels: #iterate through labels of current image
prediction = currentData * self.weights[label]
if prediction > bestScore or bestScore < 0:
bestScore = prediction
bestLabel = label
actualLabel = trainingLabels[i]
if bestLabel != actualLabel: # prediction is incorrect
data = currentData.copy()
# formula for t given in the instruction
t = min(0.001, (self.weights[bestLabel] - self.weights[actualLabel]) * data + 1.0 / 2.0 * (data * data))
#multiply t to features vector
for index in data:
data[index] = data[index] * t
# update weights
self.weights[actualLabel] = self.weights[actualLabel] + data # under predicted
self.weights[bestLabel] = self.weights[bestLabel] - data # over predicted
def classify(self, data ):
"""
Classifies each datum as the label that most closely matches the prototype vector
for that label. See the project description for details.
Recall that a datum is a util.counter...
"""
guesses = []
for datum in data:
vectors = util.Counter()
for l in self.legalLabels:
vectors[l] = self.weights[l] * datum
guesses.append(vectors.argMax())
return guesses
|
import json
import os
from datetime import datetime
from .config import config
class Entry():
"""Loads entry json file, and presents a simple api to access and
save fields"""
def __init__(self):
d = datetime.now()
self.datetime = {
"year": d.year,
"month": d.month,
"day": d.day,
"hour": d.hour,
"minute": d.minute,
"second": d.second,
}
self.tags = []
self.title = ""
self.text = ""
self.__file_name = ""
@classmethod
def load(cls, file_name, directory=config.directory):
"""Loads an entry json and returns an Entry object"""
with open(f"{directory}/{file_name}", "r") as f:
data = json.load(f)
entry = cls()
entry.datetime = data["datetime"]
entry.tags = data["tags"]
entry.title = data["title"]
entry.text = data["text"]
entry.__file_name = file_name
return entry
def save(self):
data = {
"datetime": self.datetime,
"tags": self.tags,
"title": self.title,
"text": self.text
}
file_name = "{}/{}".format(config.directory, self.file_name)
with open(file_name, "w") as f:
json.dump(data, f, ensure_ascii=True)
def delete(self):
os.remove("{}/{}".format(config.directory, self.file_name))
###########################################################################
@property
def file_name(self):
"""Generates or loads the filename to be used when saving
"""
if not self.__file_name:
date = self.str_date
time = self.str_time.replace(":", "-")
self.__file_name = "{} - {}.json".format(date, time)
return self.__file_name
@property
def str_date(self):
return "{year}-{month:0>2}-{day:0>2}".format(**self.datetime)
@property
def str_time(self):
return "{hour:0>2}:{minute:0>2}:{second:0>2}".format(**self.datetime)
@property
def str_time_short(self):
return "{hour:0>2}:{minute:0>2}".format(**self.datetime)
def __str__(self):
if self.title:
return "{} {} {}".format(self.str_date, self.str_time_short, self.title)
else:
return "{} {}".format(self.str_date, self.str_time_short)
###########################################################################
@staticmethod
def sort_key(entry):
"""To be used in sorted(list, key=Entry.sort_key)"""
return "{}T{}-{}".format(entry.str_date, entry.str_time, entry.title)
|
from django.conf.urls import patterns, include, url
from store_search import views
urlpatterns = (
url(r'^search$', views.search_page, name='store_search'),
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from optparse import make_option
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext_lazy as _
class Command(BaseCommand):
help = _("Collect information about all customers which accessed this shop.")
option_list = BaseCommand.option_list + (
make_option("--delete-expired", action='store_true', dest='delete_expired',
help=_("Delete customers with expired sessions.")),
)
def handle(self, verbosity, delete_expired, *args, **options):
from shop.models.customer import CustomerModel
data = dict(total=0, anonymous=0, active=0, staff=0, guests=0, registered=0, expired=0)
for customer in CustomerModel.objects.iterator():
data['total'] += 1
if customer.user.is_active:
data['active'] += 1
if customer.user.is_staff:
data['staff'] += 1
if customer.is_registered():
data['registered'] += 1
elif customer.is_guest():
data['guests'] += 1
elif customer.is_anonymous():
data['anonymous'] += 1
if customer.is_expired():
data['expired'] += 1
if delete_expired:
customer.delete()
msg = _("Customers in this shop: total={total}, anonymous={anonymous}, expired={expired}, active={active}, guests={guests}, registered={registered}, staff={staff}.")
self.stdout.write(msg.format(**data))
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Slight modifications to the D4RL loader to enable obtaining next_actions if
desired.
"""
import collections
import numpy as np
def qlearning_dataset(
env,
dataset=None,
terminate_on_end=False,
include_next_actions=False,
**kwargs):
"""
Returns datasets formatted for use by standard Q-learning algorithms,
with observations, actions, next_observations, rewards, and a terminal
flag.
Args:
env: An OfflineEnv object.
dataset: An optional dataset to pass in for processing. If None,
the dataset will default to env.get_dataset()
terminate_on_end (bool): Set done=True on the last timestep
in a trajectory. Default is False, and will discard the
last timestep in each trajectory.
include_next_actions (bool): whether to also return the next actions.
Useful for running policy evaluation on the dataset generating
policy. The next action for the final timestep of each episode is
the copy of the previous action.
**kwargs: Arguments to pass to env.get_dataset().
Returns:
A dictionary containing keys:
observations: An N x dim_obs array of observations.
actions: An N x dim_action array of actions.
next_observations: An N x dim_obs array of next observations.
rewards: An N-dim float array of rewards.
terminals: An N-dim boolean array of "done" or episode termination flags.
"""
if dataset is None:
dataset = env.get_dataset(**kwargs)
N = dataset['rewards'].shape[0]
obs_ = []
next_obs_ = []
action_ = []
reward_ = []
done_ = []
next_action_ = []
# The newer version of the dataset adds an explicit
# timeouts field. Keep old method for backwards compatability.
use_timeouts = False
if 'timeouts' in dataset:
use_timeouts = True
episode_step = 0
for i in range(N-1):
obs = dataset['observations'][i].astype(np.float32)
new_obs = dataset['observations'][i+1].astype(np.float32)
action = dataset['actions'][i].astype(np.float32)
reward = dataset['rewards'][i].astype(np.float32)
done_bool = bool(dataset['terminals'][i])
if use_timeouts:
final_timestep = dataset['timeouts'][i]
else:
final_timestep = (episode_step == env._max_episode_steps - 1)
if (not terminate_on_end) and final_timestep:
# Skip this transition and don't apply terminals on the last step of an episode
episode_step = 0
continue
if done_bool or final_timestep:
# for the last timestep copying the action as the next action
next_action = action.copy()
episode_step = 0
else:
next_action = dataset['actions'][i+1].astype(np.float32)
obs_.append(obs)
next_obs_.append(new_obs)
action_.append(action)
reward_.append(reward)
done_.append(done_bool)
next_action_.append(next_action)
episode_step += 1
if include_next_actions:
return {
'observations': np.array(obs_),
'actions': np.array(action_),
'next_observations': np.array(next_obs_),
'rewards': np.array(reward_),
'terminals': np.array(done_),
'next_actions': np.array(next_action_),
}
return {
'observations': np.array(obs_),
'actions': np.array(action_),
'next_observations': np.array(next_obs_),
'rewards': np.array(reward_),
'terminals': np.array(done_),
}
def sequence_dataset(env, dataset=None, **kwargs):
"""
Returns an iterator through trajectories.
Args:
env: An OfflineEnv object.
dataset: An optional dataset to pass in for processing. If None,
the dataset will default to env.get_dataset()
**kwargs: Arguments to pass to env.get_dataset().
Returns:
An iterator through dictionaries with keys:
observations
actions
rewards
terminals
"""
if dataset is None:
dataset = env.get_dataset(**kwargs)
N = dataset['rewards'].shape[0]
data_ = collections.defaultdict(list)
# The newer version of the dataset adds an explicit
# timeouts field. Keep old method for backwards compatability.
use_timeouts = False
if 'timeouts' in dataset:
use_timeouts = True
episode_step = 0
for i in range(N):
done_bool = bool(dataset['terminals'][i])
if use_timeouts:
final_timestep = dataset['timeouts'][i]
else:
final_timestep = (episode_step == env._max_episode_steps - 1)
if done_bool or final_timestep:
episode_step = 0
episode_data = {}
for k in data_:
episode_data[k] = np.array(data_[k])
yield episode_data
data_ = collections.defaultdict(list)
for k in dataset:
data_[k].append(dataset[k][i])
episode_step += 1
|
"""
"regex_classifier" categorises the label based on keyword occurrences.
- keywords are chosen from the most frequent and symbolic words from each category (doc/feature/bug/other)
- word frequency analysis is done both holistically and individually on each repo
- it turns out that individual repo analysis gives us more insights into words that symbolise the category
- individual repo analysis also avoids large repo overshadows the insights from smaller repo
- frequent words that are common to many categories are not selected
- stop words and punctuations are not selected
"""
import math
import os
import pickle
import re
import shutil
import pandas as pd
from dotenv import load_dotenv
from sklearn.metrics import precision_recall_fscore_support
from utils import accuracy_labelled
load_dotenv()
ROOT = os.environ.get("ROOT")
LOAD_TRAIN_PATH = f"{ROOT}/pipeline/pickles/dataframe_train.pkl"
LOAD_TEST_PATH = f"{ROOT}/pipeline/pickles/dataframe_test.pkl"
SAVE_DIR = f"{ROOT}/results/regex"
TRAIN_TEST_SPLIT = 0.8
FEATURES = ['title', 'body']
def load_pickle(filename):
with (open(filename, "rb")) as file:
data = pickle.load(file)
return data
def load_dataframe_from_pickle(path):
retrieved_df = pd.read_pickle(path)
return retrieved_df
def bug_regex():
''' Returns regex to detect bug class. '''
key_words = "(version|packages|line|file|model|core|import|source|local|device|error|build|return|unknown|backtrace|debug|bug|panic|test|what)"
return key_words
def docs_regex():
''' Returns regex to detect doc class. '''
key_words = "(issue|doc|example|version|define|model|guide|use|src|source|need|description|link|changing|api|)"
return key_words
def features_regex():
''' Returns regex to detect feature class. '''
key_words = "(feature|version|current|using|model|contrib|operation|type|would|use|unsupported|convert|information|system)"
return key_words
def other_regex():
''' Returns regex to detect feature class. '''
key_words = "(master|github|version|src|name|use|cluster|node|error|service|pkg|test|code|default|file|etc|system|type|local|using|true|core|image|what|run)"
return key_words
def compute_regex_class(sentence):
""" Returns class label for sentence. """
bug = bug_regex()
docs = docs_regex()
features = features_regex()
count = [len(re.findall(bug, sentence, re.IGNORECASE)),
len(re.findall(docs, sentence, re.IGNORECASE)),
len(re.findall(features, sentence, re.IGNORECASE))] # bug, doc, feature counts
if max(count) == 0:
return -1 # not confident
else:
return count.index(max(count))
def load_pickle(filename):
with (open(filename, "rb")) as file:
data = pickle.load(file)
return data
def main():
print("Preparing data...")
# Load data
train_data = load_dataframe_from_pickle(LOAD_TRAIN_PATH)
training_length = math.ceil(len(train_data.index) * TRAIN_TEST_SPLIT)
train_data = train_data[training_length:] # No need training set for regex
test_data = load_dataframe_from_pickle(LOAD_TEST_PATH)
datasets = [train_data, test_data]
# Retrieve features
print("Retrieving features...")
for ds in datasets:
ds['X'] = ""
for feature in FEATURES:
ds['X'] += ds[feature] + " "
# Regex matching
print("Matching regex...")
results = []
for ds in datasets:
ds["pred"] = ds['X'].apply(compute_regex_class)
Y_pred_np = ds["pred"].to_numpy()
Y_np = ds["labels"].to_numpy()
acc = accuracy_labelled(Y_pred_np, Y_np)
precision, recall, fscore, _ = precision_recall_fscore_support(Y_np, Y_pred_np,
average="weighted") # weighted to account for label imbalance
result = {
'accuracy': acc,
'precision': precision,
'recall': recall,
'fscore': fscore,
}
results.append(result)
# saving results and model
print("Saving the good stuff...")
info = {
"Results for seen repos": results[0],
"Results for unseen repos": results[1],
"Bug regex": bug_regex(),
"Doc regex": docs_regex(),
"Feature regex": features_regex(),
}
if os.path.exists(SAVE_DIR):
shutil.rmtree(SAVE_DIR) # start with clean slate
os.makedirs(SAVE_DIR)
data_file = open(f'{SAVE_DIR}/data.txt', "w+")
data_file.write(str(info))
data_file.close()
if __name__ == "__main__":
main()
|
from sawyer.ros.envs.sawyer.pick_and_place_env import PickAndPlaceEnv
from sawyer.ros.envs.sawyer.push_env import PushEnv
from sawyer.ros.envs.sawyer.reacher_env import ReacherEnv
from sawyer.ros.envs.sawyer.toy_env import ToyEnv
from sawyer.ros.envs.sawyer.transition_env import TransitionEnv, TransitionPickEnv, TransitionPlaceEnv, TransitionPickAndPlaceEnv
__all__ = ["PickAndPlaceEnv", "PushEnv", "ReacherEnv", "ToyEnv", "TransitionEnv", "TransitionPickEnv", "TransitionPlaceEnv", "TransitionPickAndPlaceEnv"]
|
import os
import abc
import json
import torch
from torch import nn
import utils
class GenericLayer(abc.ABC, nn.Module):
def __init__(self, previous_layer=None, input_dim=None, output_dim=None, samedim: bool = False,
**kwargs):
super().__init__()
self.model_name = 'GenericLayer'
if input_dim is None:
input_dim = previous_layer.get_output_dim()
self.input_dim = input_dim
if not isinstance(self.input_dim, int):
raise ValueError(f'input_dim should be int got {type(self.input_dim)} instead')
self.output_dim = output_dim
if samedim: # e.g for dropout and batchnorm
self.output_dim = self.input_dim
if not isinstance(self.output_dim, int):
raise ValueError(f'output_dim should be int got {type(self.output_dim)} instead')
def get_output_dim(self):
return self.output_dim
class GRULayer(GenericLayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None,
output_dim: int = None,
bidirectional: bool = False):
super().__init__(previous_layer, input_dim, output_dim)
self.bidirectional = bidirectional
self.encoder = nn.GRU(self.input_dim, self.output_dim, num_layers=1, bidirectional=self.bidirectional,
batch_first=True)
def forward(self, x: torch.Tensor, lengths=None, return_hidden: bool = False):
x, hidden = self.encoder(x)
if lengths is not None:
x = x[torch.arange(x.size(0)), lengths]
if return_hidden:
return x, hidden
else:
return x
def get_output_dim(self):
ndir = 2 if self.bidirectional else 1
return ndir * self.output_dim
class LSTMLayer(GRULayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None,
output_dim: int = None,
bidirectional: bool = False):
super(GRULayer, self).__init__(previous_layer, input_dim, output_dim)
self.bidirectional = bidirectional
self.encoder = nn.LSTM(self.input_dim, self.output_dim, num_layers=1, bidirectional=self.bidirectional,
batch_first=True)
class FeedForwardLayer(GenericLayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None,
output_dim: int = None):
super().__init__(previous_layer, input_dim, output_dim)
self.output_dim = output_dim
self.encoder = nn.Linear(self.input_dim, self.output_dim)
def forward(self, x: torch.Tensor, lengths=None):
x = self.encoder(x)
if lengths is not None:
x = x[torch.arange(x.size(0)), lengths]
return x
# 1D CNN
class CNNLayer(GenericLayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None,
output_dim: int = None,
kind: str = '1D',
stride: int = 1,
bias=False,
kernel_size: int = 5, dilation: int = 1,
padding: float = None):
super().__init__(previous_layer, input_dim, output_dim)
# if not kernel_size % 2:
# kernel_size += 1
self.kind = kind
self.kernel_size = kernel_size
self.dilation = dilation
self.stride = stride
self.bias = bias
if padding is None:
self.padding = int(dilation * (kernel_size - 1) / 2)
else:
self.padding = padding
if self.kind == '1D':
conv = nn.Conv1d
elif self.kind == '2D':
raise NotImplementedError(f'Conv2D')
# conv = nn.Conv2d
else:
raise ValueError(f'expected convolutional type `1D`, got {self.kind} instead')
self.encoder = conv(self.input_dim, self.output_dim,
kernel_size=self.kernel_size,
dilation=self.dilation,
padding=self.padding,
stride=self.stride,
bias=self.bias)
def forward(self, x: torch.Tensor, lengths=None):
x = x.permute(0, 2, 1)
x = self.encoder(x)
x = x.permute(0, 2, 1)
if lengths is not None:
x = x[torch.arange(x.size(0)), lengths]
return x
class EmbeddingLayer(GenericLayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None, output_dim: int = None,
pad=False, padding_idx=None):
super().__init__(None, input_dim, output_dim)
if pad and padding_idx is None:
# In this case assume the last embedding is used for padding
padding_idx = output_dim
# In theory the line below should be within an `if pad:` block but is left outside for convenience so that
# if `padding_idx` is set, it will behave as if `pad` is True regardless of the actual value of `pad`
self.encoder = nn.Embedding(self.input_dim, self.output_dim, padding_idx=padding_idx)
def forward(self, x: torch.Tensor):
return self.encoder(x)
class DropoutLayer(GenericLayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None, p=0.):
super().__init__(previous_layer, input_dim, samedim=True)
self.encoder = nn.Dropout(p)
def forward(self, x: torch.Tensor):
return self.encoder(x)
class ReLULayer(GenericLayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None):
super().__init__(previous_layer, input_dim, samedim=True)
self.encoder = nn.ReLU()
def forward(self, x: torch.Tensor):
return self.encoder(x)
class BatchNormLayer(GenericLayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None, **kwargs):
super().__init__(previous_layer, input_dim, samedim=True)
self.encoder = nn.BatchNorm1d(self.input_dim, **kwargs)
def forward(self, x: torch.Tensor):
x = x.permute(0, 2, 1)
x = self.encoder(x)
return x.permute(0, 2, 1)
class LayerNormLayer(GenericLayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None, affine=True, **kwargs):
super().__init__(previous_layer, input_dim, samedim=True)
self.encoder = nn.LayerNorm(self.input_dim, elementwise_affine=affine, **kwargs)
def forward(self, x: torch.Tensor):
x = self.encoder(x)
return x
class SubsamplingLayer(GenericLayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None, factor=1, concat=False):
super().__init__(previous_layer, input_dim, samedim=True)
self.subsampling_factor = factor
self.concat = concat
if self.concat:
self.output_dim = self.subsampling_factor * self.output_dim
self.encoder = lambda x: [x[:, i::self.subsampling_factor] for i in range(self.subsampling_factor)]
else:
self.encoder = lambda x: x[:, ::self.subsampling_factor]
def forward(self, x: torch.Tensor):
if self.concat:
x = self.encoder(x)
lens = [_.size(1) for _ in x]
maxlen = max(lens)
x = [torch.cat((arr, torch.zeros(x[0].size(0), maxlen - lv, x[0].size(-1), device=x[0].device)), dim=1)
for lv, arr in zip(lens, x)]
x = torch.cat(x, dim=-1)
else:
x = self.encoder(x)
return x
class UpsamplingLayer(GenericLayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None,
factor: int = 1, mode: str = 'nearest'):
super().__init__(previous_layer, input_dim, samedim=True)
self.encoder = nn.Upsample(scale_factor=factor, mode=mode)
def forward(self, x: torch.Tensor):
x = x.transpose(-1, -2)
x = self.encoder(x)
x = x.transpose(-1, -2).contiguous()
return x
class JitterLayer(GenericLayer):
def __init__(self, previous_layer: nn.Module = None, input_dim: int = None, p: float = 0):
super().__init__(previous_layer, input_dim, samedim=True)
self.p = p
def forward(self, x: torch.Tensor):
if not self.training or self.p == 0:
return x
else:
if x.ndim == 2:
x = x.unsqueeze(0)
elif x.ndim != 3:
raise ValueError(f'expected 2D or 3D tensor, got {x.ndim} instead')
index_tensor = torch.arange(x.size(-2)).unsqueeze(0).unsqueeze(-1).expand_as(x)
change_prob = torch.rand_like(index_tensor.float())
index_change = (change_prob <= self.p).int()
index_change = torch.where(change_prob > (self.p / 2), index_change, -index_change) # t - 1
index_tensor = index_tensor + index_change
index_tensor.clamp_min_(0)
index_tensor.clamp_max_(x.size(-2) - 1)
x = torch.gather(x, -2, index_tensor.to(x.device)).squeeze()
return x
class CompositeModel(nn.Module):
def __init__(self, layers_dict, ordering=None, input_dim=None):
super().__init__()
self.model_name = 'CompositeModel'
if isinstance(layers_dict, str):
with open(layers_dict) as _json:
layers_dict = json.load(_json)
ordering = layers_dict['ordering']
layers_dict = layers_dict['layers']
self.layers_dict = layers_dict
if ordering is None:
ordering = sorted([int(x) for x in layers_dict.keys()])
self.ordering = [str(x) for x in ordering]
if input_dim is not None:
self.layers_dict[self.ordering[0]]['input_dim'] = input_dim
layers = [make_layer(self.layers_dict[self.ordering[0]])]
self.input_dim = layers[0].input_dim
for key in self.ordering[1:]:
layer = make_layer(self.layers_dict[key], previous_layer=layers[-1])
layers.append(layer)
self.output_dim = layers[-1].output_dim
self.layers = nn.ModuleList(layers)
print(self.layers)
def forward_at_t(self, x, lengths=None):
if lengths is None:
lengths = [x.size(1)-1 for _ in x]
return self(x, lengths)
def summarize(self, input_sequence, mask=None, dim=1, **kwargs):
x = self(input_sequence, **kwargs)
if mask is not None:
if mask.ndim < x.ndim:
mask = mask.unsqueeze(-1).expand(*x.shape)
x = x * mask
return self.output_layer(x.sum(dim=dim))
def forward(self, x, lengths=None, outputs_at_layer=None):
intermediates = []
for i, layer in enumerate(self.layers):
x = layer(x)
if outputs_at_layer is not None and i in outputs_at_layer:
intermediates.append(x)
if lengths is not None:
x = x[torch.arange(x.size(0)), lengths]
if outputs_at_layer is not None:
return x, intermediates
return x
def save(self, outdir):
utils.chk_mkdir(outdir)
with open(os.path.join(outdir, 'nnet.json'), 'w') as _json:
savedict = {'layers': self.layers_dict,
'ordering': self.ordering}
json.dump(savedict, _json)
if hasattr(self, 'model_name'):
with open(os.path.join(outdir, 'nnet_kind.txt'), 'w') as _kind:
_kind.write(self.model_name)
torch.save(self.state_dict(), os.path.join(outdir, 'nnet.mdl'))
@classmethod
def load_from_dir(cls, nnetdir, map_location=None):
net = cls(os.path.join(nnetdir, 'nnet.json'))
state_dict = torch.load(os.path.join(nnetdir, 'nnet.mdl'),
map_location=map_location)
net.to(map_location)
net.to(map_location)
net.load_state_dict(state_dict)
return net
_layers = {'GRULayer': GRULayer,
'GRU': GRULayer,
'LSTMLayer': LSTMLayer,
'LSTM': LSTMLayer,
'FeedForwardLayer': FeedForwardLayer,
'FF': FeedForwardLayer,
'CNNLayer': CNNLayer,
'CNN': CNNLayer,
'Dropout': DropoutLayer,
'Drop': DropoutLayer,
'BatchNormLayer': BatchNormLayer,
'BatchNorm': BatchNormLayer,
'LayerNormLayer': LayerNormLayer,
'LayerNorm': LayerNormLayer,
'ReLU': ReLULayer,
'SubsamplingLayer': SubsamplingLayer,
'Subs': SubsamplingLayer,
'UpsamplingLayer': UpsamplingLayer,
'Ups': UpsamplingLayer,
'EmbeddingLayer': EmbeddingLayer,
'Embedding': EmbeddingLayer,
'JitterLayer': JitterLayer,
'Jitter': JitterLayer,
}
def make_layer(layer_dict: dict, previous_layer=None, **kwargs) -> GenericLayer:
layer_dict = layer_dict.copy()
layer_name = layer_dict.pop('layer_name')
layer = _layers[layer_name](previous_layer=previous_layer, **layer_dict, **kwargs)
return layer
def test_block():
x0 = torch.randn(32, 100, 42)
q0 = torch.randint(26, (32, 10))
desc = {"0": {'layer_name': 'CNN', 'input_dim': 42, 'output_dim': 256},
"6": {'layer_name': 'BatchNormLayer'},
"9": {'layer_name': 'ReLU'},
"4": {'layer_name': 'Drop', 'p': 0.5},
"1": {'layer_name': 'GRU', 'output_dim': 256, 'bidirectional': True},
"7": {'layer_name': 'BatchNormLayer'},
"8": {'layer_name': 'Drop', 'p': 0.5},
"3": {'layer_name': 'FF', 'output_dim': 20},
"10": {'layer_name': 'SubsamplingLayer', 'factor': 2},
"11": {'layer_name': 'SubsamplingLayer', 'factor': 3, 'concat': True},
"12": {'layer_name': 'Embedding', 'input_dim': 26, 'output_dim': 32},
"13": {'layer_name': 'CNN', 'output_dim': 256, 'kernel_size': 12},
"14": {'layer_name': 'JitterLayer', 'p': 0.4},
}
model3 = CompositeModel(desc, ordering=['13', '10', '6', '9', '4', '1', '10', '7', '8', '3', '11',
'14'], input_dim=42)
qmod = CompositeModel(desc, ordering=['12', '1', '3'], input_dim=26)
# model3.eval()
y0 = model3(x0)
model3.save('tmp/thisdir')
model4 = CompositeModel.load_from_dir('tmp/thisdir')
# model4.eval()
y1 = model4(x0)
qrep = qmod.forward_at_t(q0)
print(x0.size())
print(y1.size())
print(qrep.size())
print(abs((y0 - y1)).sum())
inds = torch.randint(32, (32,))
subs = 1
for layer in model4.layers:
try:
subs *= layer.subsampling_factor
except AttributeError:
continue
inds = inds // subs
print(model4.forward_at_t(x0, inds).size())
print(model4.input_dim)
opt = torch.optim.Adam([{'params': model3.parameters()},
{'params': model4.parameters()}],
lr=1e-3)
loss = y0.sum() + y1.sum()
loss.backward()
x, inter = model3(x0, outputs_at_layer=[10, 11])
print((inter[0] == inter[1]).sum().float()/(x == x).sum().float())
x.sum().backward()
# print(model3.layers[0].encoder.weight.grad)
# opt.param_groups[0].zero_grad()
# print(model3.layers[0].encoder.weight.grad)
if __name__ == '__main__':
test_block()
|
class Solution:
def addDigits(self, num):
while num >= 10:
digits = [int(x) for x in str(num)]
num = sum(digits)
return num
|
#!/usr/bin/env python
"""
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
import pyglet
from pyglet.window.xlib import xlib
import lib_xrandr as xrandr
def _check_extension(display):
major_opcode = ctypes.c_int()
first_event = ctypes.c_int()
first_error = ctypes.c_int()
xlib.XQueryExtension(display._display, 'RANDR',
ctypes.byref(major_opcode),
ctypes.byref(first_event),
ctypes.byref(first_error))
if not major_opcode.value:
raise Exception('RANDR extension not available')
def _check_version(display):
major = ctypes.c_int()
minor = ctypes.c_int()
xrandr.XRRQueryVersion(display._display,
ctypes.byref(major), ctypes.byref(minor))
if major.value < 1 or minor.value < 2:
raise Exception('Server does not support RandR 1.2')
return '%d.%d' % (major.value, minor.value)
display = pyglet.window.get_platform().get_default_display()
_check_extension(display)
_check_version(display)
_display = display._display
root_windows = set()
for screen in display.get_screens():
x_screen = xlib.XScreenOfDisplay(_display, screen._x_screen_id)
root_window = xlib.XRootWindowOfScreen(x_screen)
root_windows.add(root_window)
for root_window in root_windows:
resources_p = xrandr.XRRGetScreenResources(_display, root_window)
resources = resources_p.contents
print('CRTCs:')
for i in range(resources.ncrtc):
info = xrandr.XRRGetCrtcInfo(_display, resources_p, resources.crtcs[i])
info = info.contents
print(' %dx%d @ %d,%d' % (info.width, info.height, info.x, info.y))
print('Modes:')
for i in range(resources.nmode):
info = resources.modes[i]
print(' (%d) %dx%d "%s"' % (info.id,
info.width, info.height, info.name))
# Set CRTC 0 to mode 1 without changing outputs
info = xrandr.XRRGetCrtcInfo(_display, resources_p, resources.crtcs[0])
info = info.contents
xrandr.XRRSetCrtcConfig(_display, resources_p, resources.crtcs[0],
info.timestamp, info.x, info.y, resources.modes[0].id,
info.rotation, info.outputs, info.noutput)
|
import logging
from typing import Any, Dict, List
import numpy as np
from overrides import overrides
import torch
import torch.nn.functional as F
from torch.nn.functional import nll_loss
from torch.nn.functional import cross_entropy
from torch.nn import CrossEntropyLoss
from pytorch_pretrained_bert.modeling import BertModel
import os
import random
import traceback
import json
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import InitializerApplicator, util
from allennlp.tools import squad_eval
from allennlp.training.metrics import Average, BooleanAccuracy, CategoricalAccuracy
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("multiqa_bert")
class MultiQA_BERT(Model):
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
initializer: InitializerApplicator,
max_span_length: int = 30,
use_multi_label_loss: bool = False,
stats_report_freq:float = None,
debug_experiment_name:str = None) -> None:
super().__init__(vocab)
self._max_span_length = max_span_length
self._text_field_embedder = text_field_embedder
self._stats_report_freq = stats_report_freq
self._debug_experiment_name = debug_experiment_name
self._use_multi_label_loss = use_multi_label_loss
# see usage below for explanation
self.qa_outputs = torch.nn.Linear(self._text_field_embedder.get_output_dim(), 2)
self.qa_yesno = torch.nn.Linear(self._text_field_embedder.get_output_dim(), \
self.vocab.get_vocab_size("yesno_labels"))
self._dropout = torch.nn.Dropout(self._text_field_embedder.token_embedder_bert.bert_model.config.hidden_dropout_prob)
initializer(self)
self._official_f1 = Average()
self._official_EM = Average()
def bert_offsets_to_wordpiece_offsets(self,bert_offsets):
# first offset is [CLS]
wordpiece_offsets = [0]
last_offset = 0
for idx, offset in enumerate(bert_offsets):
wordpiece_offsets += [idx for i in range(last_offset,offset)]
last_offset = offset
return wordpiece_offsets
def forward(self, # type: ignore
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
span_starts: torch.IntTensor = None,
span_ends: torch.IntTensor = None,
yesno_labels : torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
batch_size, num_of_passage_tokens = passage['bert'].size()
# Executing the BERT model on the word piece ids (input_ids)
input_ids = passage['bert']
token_type_ids = torch.zeros_like(input_ids)
mask = (input_ids != 0).long()
embedded_chunk, pooled_output = \
self._text_field_embedder.token_embedder_bert.bert_model(input_ids=util.combine_initial_dims(input_ids),
token_type_ids=util.combine_initial_dims(token_type_ids),
attention_mask=util.combine_initial_dims(mask),
output_all_encoded_layers=False)
# Just measuring some lengths and offsets to handle the converstion between tokens and word-pieces
passage_length = embedded_chunk.size(1)
mask_min_values, wordpiece_passage_lens = torch.min(mask, dim=1)
wordpiece_passage_lens[mask_min_values == 1] = mask.shape[1]
offset_min_values, token_passage_lens = torch.min(passage['bert-offsets'], dim=1)
token_passage_lens[offset_min_values != 0] = passage['bert-offsets'].shape[1]
bert_offsets = passage['bert-offsets'].cpu().numpy()
# BERT for QA is a fully connected linear layer on top of BERT producing 2 vectors of
# start and end spans.
logits = self.qa_outputs(embedded_chunk)
start_logits, end_logits = logits.split(1, dim=-1)
span_start_logits = start_logits.squeeze(-1)
span_end_logits = end_logits.squeeze(-1)
# all input is preprocessed before farword is run, counting the yesno vocabulary
# will indicate if yesno support is at all needed.
if self.vocab.get_vocab_size("yesno_labels") > 1:
#pooled_output = self._dropout(pooled_output)
yesno_logits = self.qa_yesno(pooled_output)
#label_logits_flat = label_logits.squeeze(1)
#label_logits = label_logits.view(-1, num_choices)
#yesno_logits = self.qa_yesno(torch.max(embedded_chunk, 1)[0])
span_starts.clamp_(0, passage_length)
span_ends.clamp_(0, passage_length)
# moving to word piece indexes from token indexes of start and end span
span_starts_list = [bert_offsets[i, span_starts[i]] if span_starts[i] != 0 else 0 for i in range(batch_size)]
span_ends_list = [bert_offsets[i, span_ends[i]] if span_ends[i] != 0 else 0 for i in range(batch_size)]
span_starts = torch.cuda.LongTensor(span_starts_list, device=span_end_logits.device) \
if torch.cuda.is_available() else torch.LongTensor(span_starts_list)
span_ends = torch.cuda.LongTensor(span_ends_list, device=span_end_logits.device) \
if torch.cuda.is_available() else torch.LongTensor(span_ends_list)
loss_fct = CrossEntropyLoss(ignore_index=passage_length)
start_loss = loss_fct(start_logits.squeeze(-1), span_starts)
end_loss = loss_fct(end_logits.squeeze(-1), span_ends)
if self.vocab.get_vocab_size("yesno_labels") > 1 and yesno_labels is not None:
yesno_loss = loss_fct(yesno_logits, yesno_labels)
loss = (start_loss + end_loss + yesno_loss) / 3
else:
loss = (start_loss + end_loss) / 2
output_dict: Dict[str, Any] = {}
if loss == 0:
# For evaluation purposes only!
output_dict["loss"] = torch.cuda.FloatTensor([0], device=span_end_logits.device) \
if torch.cuda.is_available() else torch.FloatTensor([0])
else:
output_dict["loss"] = loss
# Compute F1 and preparing the output dictionary.
output_dict['best_span_str'] = []
output_dict['best_span_logit'] = []
output_dict['yesno'] = []
output_dict['yesno_logit'] = []
output_dict['qid'] = []
if span_starts is not None:
output_dict['EM'] = []
output_dict['f1'] = []
# getting best span prediction for
best_span = self._get_example_predications(span_start_logits, span_end_logits, self._max_span_length)
best_span_cpu = best_span.detach().cpu().numpy()
for instance_ind, instance_metadata in zip(range(batch_size), metadata):
best_span_logit = span_start_logits.data.cpu().numpy()[instance_ind, best_span_cpu[instance_ind][0]] + \
span_end_logits.data.cpu().numpy()[instance_ind, best_span_cpu[instance_ind][1]]
if self.vocab.get_vocab_size("yesno_labels") > 1:
yesno_maxind = np.argmax(yesno_logits[instance_ind].data.cpu().numpy())
yesno_logit = yesno_logits[instance_ind, yesno_maxind].data.cpu().numpy()
yesno_pred = self.vocab.get_token_from_index(yesno_maxind, namespace="yesno_labels")
else:
yesno_pred = 'no_yesno'
yesno_logit = -30.0
passage_str = instance_metadata['original_passage']
offsets = instance_metadata['token_offsets']
predicted_span = best_span_cpu[instance_ind]
# In this version yesno if not "no_yesno" will be regarded as final answer before the spans are considered.
if yesno_pred != 'no_yesno':
best_span_string = yesno_pred
else:
if predicted_span[0] == 0 and predicted_span[1] == 0:
best_span_string = 'cannot_answer'
else:
wordpiece_offsets = self.bert_offsets_to_wordpiece_offsets(bert_offsets[instance_ind][0:len(offsets)])
start_offset = offsets[wordpiece_offsets[predicted_span[0] if predicted_span[0] < len(wordpiece_offsets) \
else len(wordpiece_offsets)-1]][0]
end_offset = offsets[wordpiece_offsets[predicted_span[1] if predicted_span[1] < len(wordpiece_offsets) \
else len(wordpiece_offsets)-1]][1]
best_span_string = passage_str[start_offset:end_offset]
output_dict['best_span_str'].append(best_span_string)
output_dict['best_span_logit'].append(best_span_logit)
output_dict['yesno'].append(yesno_pred)
output_dict['yesno_logit'].append(yesno_logit)
output_dict['qid'].append(instance_metadata['question_id'])
# In AllenNLP prediction mode we have no gold answers, so let's check
if span_starts is not None:
yesno_label_ind = yesno_labels.data.cpu().numpy()[instance_ind]
yesno_label = self.vocab.get_token_from_index(yesno_label_ind, namespace="yesno_labels")
if yesno_label != 'no_yesno':
gold_answer_texts = [yesno_label]
elif instance_metadata['cannot_answer']:
gold_answer_texts = ['cannot_answer']
else:
gold_answer_texts = instance_metadata['answer_texts_list']
f1_score = squad_eval.metric_max_over_ground_truths(squad_eval.f1_score, best_span_string, gold_answer_texts)
EM_score = squad_eval.metric_max_over_ground_truths(squad_eval.exact_match_score, best_span_string, gold_answer_texts)
self._official_f1(100 * f1_score)
self._official_EM(100 * EM_score)
output_dict['EM'].append(100 * EM_score)
output_dict['f1'].append(100 * f1_score)
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {'EM': self._official_EM.get_metric(reset),
'f1': self._official_f1.get_metric(reset)}
@staticmethod
def _get_example_predications(span_start_logits: torch.Tensor,
span_end_logits: torch.Tensor,
max_span_length: int) -> torch.Tensor:
# Returns the index of highest-scoring span that is not longer than 30 tokens, as well as
# yesno prediction bit and followup prediction bit from the predicted span end token.
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
max_span_log_prob = [-1e20] * batch_size
span_start_argmax = [0] * batch_size
best_word_span = span_start_logits.new_zeros((batch_size, 4), dtype=torch.long)
span_start_logits = span_start_logits.data.cpu().numpy()
span_end_logits = span_end_logits.data.cpu().numpy()
for b_i in range(batch_size): # pylint: disable=invalid-name
for j in range(passage_length):
val1 = span_start_logits[b_i, span_start_argmax[b_i]]
if val1 < span_start_logits[b_i, j]:
span_start_argmax[b_i] = j
val1 = span_start_logits[b_i, j]
val2 = span_end_logits[b_i, j]
if val1 + val2 > max_span_log_prob[b_i]:
if j - span_start_argmax[b_i] > max_span_length:
continue
best_word_span[b_i, 0] = span_start_argmax[b_i]
best_word_span[b_i, 1] = j
max_span_log_prob[b_i] = val1 + val2
for b_i in range(batch_size):
j = best_word_span[b_i, 1]
return best_word_span |
import csv_utilities
from collections import namedtuple
class Data( object ):
def __init__( self, filename, reader_fun, converter_fun=lambda x: x ):
Row = namedtuple( 'Row', [ 'X', 'Y' ] )
reader_data = converter_fun( reader_fun( filename ) )
self.attributes = reader_data[ 0 ][ :-1 ] # Remove 'class' attribute
self.rows = [ Row( row[ :-1 ], row[ -1 ] ) for row in reader_data[ 1 ] ] # Named tuple Row( X: row_0, ..., row_n-1, Y: row_n )
self.attributeCount = len( self.attributes )
def getRowsTuples( self ):
return [ ( row[:-1], row[-1] ) for row in self.rows ]
def getTrueResults( self ):
return [ row[ -1 ] for row in self.rows ]
def compareTrueResultsWith( self, results ):
rows_count = len( results )
if rows_count != len( self.rows ):
print( "Error while comparing results." ); return None
correct_rows_count = 0
for row in range( rows_count ):
correct_rows_count += ( 1 if results[ row ] == self.rows[ row ][ -1 ] else 0 )
return ( correct_rows_count / rows_count ) * 100
|
from numbers import Real
class Site:
"""This is a class for create an object of sites.
:param index: The index of each site of the system
:type index: int
:param position: The position of each site of the system
:type position: float
:param type: The type of each site in the system
:type type: str
:param mu: The spin norms of each site of the system.
:type mu: float
:param anisotropy_constant: The anisotropy constant of the system
:type anisotropy_constant: float
:param anisotropy_axis: The anisotropy axis of the system
:type anisotropy_axis: float
:param field_axis: The field axis of the system
:type field_axis: float
:param neighbors: The list of neighbors of the sites in the system.
:type neighbors: list
:param jexs: The list of the exchanges interactions of the sites in the system.
:type jexs: list
"""
def __init__(
self,
index,
position,
type_,
mu,
anisotropy_constant,
anisotopy_axis,
field_axis,
):
"""The constructor for Site class.
:param index: The index of each site of the system.
:type index: int
:param position: The position of each site of the system.
:type position: float
:param type__: The type of each site in the system.
:type type__: str
:param mu: The .
:type mu: float
:param anisotropy_constant: The anisotropy constant of the system.
:type anisotropy_constant: float
:param anisotropy_axis: The anisotropy axis of the system.
:type anisotropy_axis: float
:param field_axis: The field axis of the system.
:type field_axis: float
"""
self.index = index
self.position = position
self.type = type_
self.mu = mu
self.anisotropy_constant = anisotropy_constant
self.anisotopy_axis = anisotopy_axis
self.field_axis = field_axis
self.__neighbors = []
self.__jexs = []
@classmethod
def from_dict(cls, site_dict):
"""The dictionary of the values for Site class. It is a function decorator, it
creates the dictionary with the attributes that belong to the class method Site.
:param site_dict: Dictionary that contains the attributes.
:type site_dict: dict
:return: Object that contains the values index, position, type_, mu,
anisotropy_constant, anisotopy_axis and field_axis.
:rtype: dict
"""
index = site_dict["index"]
position = site_dict["position"]
type_ = site_dict["type"]
mu = site_dict["mu"]
anisotropy_constant = site_dict["anisotropy_constant"]
anisotopy_axis = site_dict["anisotopy_axis"]
field_axis = site_dict["field_axis"]
return cls(
index, position, type_, mu, anisotropy_constant, anisotopy_axis, field_axis
)
def append_neighbor(self, neighbor, jex: float):
"""Function to append the neighbors and it own jexs for Site class.
:param neighbor: The neighbors of each site in the system.
:type neighbor: int
:param jex: The exchange interaction of each site in the system.
:type jex: float
"""
if not isinstance(neighbor, Site):
raise Exception("`neighbor` is not an instance of Site.")
if not isinstance(jex, Real):
raise Exception("`jex` is not an instance of Real.")
self.__neighbors.append(neighbor)
self.__jexs.append(jex)
def set_neighbors(self, neighbors: list, jexs: list):
"""It is a function to set the neighbors
:param neighbors: The list of neighbors of the sites in the system.
:type neighbors: list
:param jexs: The list of the exchanges interactions of the sites in the system.
:type jexs: list
"""
for neighbor, jex in zip(neighbors, jexs):
self.append_neighbor(neighbor, jex)
@property
def neighbors(self):
"""It is a function decorator, it provides an interface to instance attribute
neighbors. It encapsulates instance attribute neighbors and provides a property
Site class.
return: Return a property attribute of neighbors.
"""
return self.__neighbors
@property
def jexs(self):
"""It is a function decorator, it provides an interface to instance attribute
exchanges interactions. It encapsulates instance attribute jexs and provides a
property Site class.
return: Return a property attribute of exchanges interactions.
"""
return self.__jexs
def __eq__(self, other_site):
return self.index == other_site.index
|
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0-only
#
# wad-shuffle-dir - Shuffle lumps in WAD files
#
# Shuffle lumps of a given type in order to produce a randomized WAD. This
# more of a novelty than a useful tool.
from __future__ import print_function
# Imports
import argparse
import atexit
import os
import random
import shutil
import subprocess
import sys
import tempfile
# Globals
args = {} # Command line arguments.
allowed_lumps = ( # Allowed lump types.
"flats",
"graphics",
"musics",
"patches",
"sounds",
"sprites")
iwad_dir = "" # Directory that the IWAD is in.
name = "" # Basename of this script without the ".py".
temp_dir = "" # Temporary directory to extract to.
# Functions
# Cleanup temporary files.
def cleanup():
common = "temporary directory \"" + temp_dir + "\"."
if args.keep:
verbose("Keeping " + common)
return
else:
verbose("Removing " + common)
shutil.rmtree(temp_dir)
# Write a fatal error message to stderr and exit.
def fatal(msg):
print(msg, file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
# Initialize variables.
def init():
global iwad_dir
global name
global temp_dir
# Make sure the IWAD exits.
if not os.path.isfile(args.iwad):
fatal("IWAD \"" + args.iwad + "\" does not exist.")
iwad_dir = os.path.dirname(args.iwad)
verbose("IWAD file : " + args.iwad)
verbose("IWAD directory : " + iwad_dir)
# Make sure that the output directory can be created.
if os.path.isdir(args.out_dir):
if not args.force:
fatal("Output directory \"" + args.out_dir + "\" already exists, but "
+ "-f, --force was not specified.")
else:
os.makedirs(args.out_dir)
verbose("Output directory : " + args.out_dir)
# Create a temporary directory.
name = os.path.basename(sys.argv[0]).replace(".py", "")
temp_dir = tempfile.mkdtemp(prefix=name + "-")
if not "win" in sys.platform.lower():
# For case sensitive operating systems (not Windows) convert to a
# lower case temp_dir to avoid a bug in duetex.
temp_dir_lower = temp_dir.lower()
if temp_dir != temp_dir_lower:
os.mkdir(temp_dir_lower, 0o700)
os.rmdir(temp_dir)
temp_dir = temp_dir_lower
verbose("Temporary directory : " + temp_dir)
if args.seed:
random.seed(args.seed)
# Make sure to clean up the above directories.
atexit.register(cleanup)
# Special value "all" means all allowed lumps.
if len(args.lumps) == 1 and args.lumps[0] == "all":
args.lumps = allowed_lumps
# args.lumps should now be a subset of allowed_lumps.
for lump in args.lumps:
if lump not in allowed_lumps:
fatal("Lump \"" + lump + "\" is not allowed. Allowed lump types: "
+ str(allowed_lumps))
if args.invert:
lumps_new = ()
for lump in allowed_lumps:
if lump not in args.lumps:
lumps_new += (lump,)
args.lumps = lumps_new
# Parse the command line arguments.
def parse_args():
global args
parser = argparse.ArgumentParser(
description="Shuffle lumps in Doom IWADs and write to a directory.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# The following is sorted by long argument.
parser.add_argument("-d", "--deutex-path", type=str, default="deutex",
help="Path to \"deutex\".")
parser.add_argument("-f", "--force", action="store_true",
help="Force. Write to OUT-DIR even if it exists.")
parser.add_argument("-i", "--invert", action="store_true",
help="Invert the lump types specified.")
parser.add_argument("-k", "--keep", action="store_true",
help="Keep the temporary directory.")
parser.add_argument("-s", "--seed", type=str,
help="Seed for the random number generator.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Verbose output.")
parser.add_argument("iwad", metavar="IWAD",
help="IWAD file.")
parser.add_argument("out_dir", metavar="OUT-DIR",
help="Output directory.")
parser.add_argument("lumps", metavar="LUMP", nargs="*", default=["sprites"],
help="Lump types to select.")
args = parser.parse_args()
# Run "deutex" to extract the lumps.
def run_deutex():
iwad_arg = "-doom2" if args.iwad.lower().endswith("2.wad") else "-doom"
dargs = [args.deutex_path, iwad_arg, iwad_dir, "-dir", temp_dir]
dargs += ["-" + l for l in args.lumps]
dargs += ["-x", args.iwad]
verbose("Running: " + " ".join(dargs))
# Suppress stdout unless verbose.
rc = subprocess.call(dargs, stdout=(
None if args.verbose else open(os.devnull, "w")))
if rc:
fatal("Unable to run deutex.")
# Process a lump by copying randomly to the output directory.
def process_lump(lump):
verbose("Processing lump \"" + lump + "\".")
sdir = temp_dir + "/" + lump
if not os.path.isdir(sdir):
fatal("Source directory \"" + sdir + "\" is missing.")
ddir = args.out_dir + "/" + lump
if not os.path.exists(ddir):
os.mkdir(ddir)
orig = os.listdir(sdir)
rnd = orig[:]
random.shuffle(rnd)
for i in range(len(orig)):
shutil.move(sdir + "/" + orig[i], ddir + "/" + rnd[i])
# Process all of the lump types.
def process_lumps():
for lump in args.lumps:
process_lump(lump)
# Log a message to stdout if verbose.
def verbose(msg):
if args.verbose:
print(msg)
# Main
parse_args()
init()
run_deutex()
process_lumps()
|
import math
pi = 3.14
x = 1
y = 2
z = 3
#print(round(pi))
#print(math.ceil(pi))
#print(math.floor(pi))
#print(abs(pi)) # if pi = -3.14 -> abs(pi) = 3.14
#print(pow(pi,2)) # potencia de 2
#print(math.sqrt(420))
print(max(x,y,z))
print(min(x,y,z)) |
from tkinter import *
class Application(Frame):
def __init__(self, master = None):
Frame.__init__(self, master)
self.msg = Label(self, text = "Hello World")
self.msg.pack()
self.bye = Button (self, text = "Bye", command = self.quit)
self.bye.pack()
self.pack
app = Application()
mainloop()
|
import subprocess
import signal
import string
import random
import re
import json
import time
import os
import socket
import requests
import logging
import sys
import base64
import yaml
from flask import Flask, request, send_from_directory, jsonify, render_template, redirect
app = Flask(__name__, static_url_path='')
logfile = '/var/log/turnkey.log'
FORMAT = '%(asctime)-15s - %(levelname)s - %(message)s'
logging.basicConfig(filename=logfile, format=FORMAT, level=logging.DEBUG)
logger = logging.getLogger('turnkey')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.DEBUG)
stdout_handler.setFormatter(logging.Formatter(FORMAT))
logger.addHandler(stdout_handler)
logger.info("****************** Begin turnkey startup.py ******************")
currentdir = os.path.dirname(os.path.abspath(__file__))
os.chdir(currentdir)
project='none'
uid = ''
def getssid():
logger.debug('entered getssid()')
ssid_list = []
with open("/var/lib/rancher/turnkey/ssid.list", 'r') as f:
ssids = f.read()
ssid_list = ssids.split('\n')
logger.debug(ssid_list)
return ssid_list
def getProjectList():
#TODO: Read this list from a configmap
# bind it to some actuall installation jobs
project_list = [
['k3s', 'Lightweight Kubernetes Cluster'],
['Rancher', 'Rancher Management Server'],
['k3os', 'An OS optimized for container orchestration']
]
return project_list
def getUniqueId():
# Get a unique id for the device
uid = ''
with open('/etc/machine-id', 'r') as f:
uid = f.readline()
logger.debug("unique machine id: " + uid)
return uid
def writeWPAConfig(ssid, passphrase):
logger.debug("inside writeWPAConfig()")
if passphrase == "":
passphrase = "key_mgmt=NONE" # open AP - no credentials
wpa_creds = b''.join([b'network={\n', bytearray(ssid,'utf-8'), b'\nkey_mgmt=NONE\n}'])
else:
wpa_creds = subprocess.check_output(['/usr/bin/wpa_passphrase', ssid, passphrase])
encoded_wpa = base64.b64encode(wpa_creds)
with open('/app/wpa_supplicant/connect-wifi.yaml', 'r') as f:
connect_yaml = yaml.safe_load(f)
connect_yaml['data'] = {'wpa_supplicant.conf': encoded_wpa.decode('utf-8')}
with open('/var/lib/rancher/k3s/server/manifests/connect-wifi.yaml', 'w') as f:
yaml.dump(connect_yaml, f)
@app.route('/')
def main():
logger.debug('entered main()')
projects = zip(*getProjectList())
# TODO: UPDATE THIS TO REFLECT ACTUAL CONTACT METHOD (SMS?)
return render_template('index.html', ssids=getssid(), projectIDs=next(projects), message="<H3>Select a wifi network to use with this device.</H3>")
# Captive portal when connected with iOS or Android
@app.route('/generate_204')
def redirect204():
logger.debug('entered redirect204()')
return redirect("http://192.168.4.1", code=302)
@app.route('/hotspot-detect.html')
def applecaptive():
logger.debug('entered applecaptive()')
return redirect("http://192.168.4.1", code=302)
# Not working for Windows, needs work!
@app.route('/ncsi.txt')
def windowscaptive():
logger.debug('entered windowscaptive()')
return redirect("http://192.168.4.1", code=302)
@app.route('/static/<path:path>')
def send_static(path):
logger.debug('entered send_static()')
return send_from_directory('static', path)
@app.route('/signin', methods=['POST'])
def signin():
global project
logger.debug('entered signin()')
ssid = request.form['ssid']
project = request.form['projectIDs']
pwd = request.form['password']
logger.debug(ssid)
writeWPAConfig(ssid, pwd)
# TODO: UPDATE THIS MESSAGE BASED ON THE CONTACT METHOD USED (SMS?)
return render_template('restart.html', message="This device is configured to run " + project + ". Click the button below to connect this device to the " + ssid + " network.")
@app.route('/restart', methods=['POST'])
def restart():
# bring up wifi
with open('/app/action/wifi.yaml', 'r') as f:
wifi_yaml = yaml.safe_load(f)
with open('/var/lib/rancher/k3s/server/manifests/wifi.yaml', 'w') as f:
yaml.dump(wifi_yaml, f)
# set status down
with open('/var/lib/rancher/turnkey/status', 'w') as f:
f.write('down')
return render_template('project-info.html', message="<br>After a few minutes, you can login to this device with ssh to the host <b>raspberrypi</b>. <br></p><li>user: <b>pi</b></li> <br><li>password: <b>raspberry</b></li><br>Once logged in, you will find your kubeconfig file is available at <code>/home/pi/.kube/config</code>")
def runapp():
app.run(host="0.0.0.0", port=80, threaded=True)
if __name__ == "__main__":
uid = getUniqueId()
# write out the ui status
# status can be one of [up|down|sleep]
with open('/var/lib/rancher/turnkey/status', 'w') as f:
f.write('up')
# fire up the input form
runapp()
|
todo_api_path: str = "https://jsonplaceholder.typicode.com/todos/"
|
import os
from typing import List, Tuple
import cv2
import numpy as np
import pickle5 as pickle
from tqdm import tqdm
from mmhuman3d.core.conventions.keypoints_mapping import convert_kps
from mmhuman3d.data.data_structures.human_data import HumanData
from .base_converter import BaseModeConverter
from .builder import DATA_CONVERTERS
@DATA_CONVERTERS.register_module()
class AgoraConverter(BaseModeConverter):
"""AGORA dataset
`AGORA: Avatars in Geography Optimized for Regression Analysis' CVPR`2021
More details can be found in the `paper
<https://arxiv.org/pdf/2104.14643.pdf>`__.
Args:
modes (list): 'validation' or 'train' for accepted modes
fit (str): 'smpl' or 'smplx for available body model fits
res (tuple): (1280, 720) or (3840, 2160) for available image resolution
"""
ACCEPTED_MODES = ['validation', 'train']
def __init__(self, modes: List = [], fit: str = 'smpl',
res: Tuple[int, int] = (1280, 720)) -> None: # yapf: disable
super(AgoraConverter, self).__init__(modes)
accepted_fits = ['smpl', 'smplx']
if fit not in accepted_fits:
raise ValueError('Input fit not in accepted fits. \
Use either smpl or smplx')
self.fit = fit
accepted_res = [(1280, 720), (3840, 2160)]
if res not in accepted_res:
raise ValueError('Input resolution not in accepted resolution. \
Use either (1280, 720) or (3840, 2160)')
self.res = res
def get_global_orient(self,
imgPath,
df,
i,
pNum,
globalOrient=None,
meanPose=True):
"""Modified from https://github.com/pixelite1201/agora_evaluation/blob/
master/agora_evaluation/projection.py specific to AGORA.
Args:
imgPath: image path
df: annotation dataframe
i: frame index
pNum: person index
globalOrient: original global orientation
meanPose: Store True for mean pose from vposer
Returns:
globalOrient: rotated global orientation
"""
if 'hdri' in imgPath:
camYaw = 0
camPitch = 0
elif 'cam00' in imgPath:
camYaw = 135
camPitch = 30
elif 'cam01' in imgPath:
camYaw = -135
camPitch = 30
elif 'cam02' in imgPath:
camYaw = -45
camPitch = 30
elif 'cam03' in imgPath:
camYaw = 45
camPitch = 30
elif 'ag2' in imgPath:
camYaw = 0
camPitch = 15
else:
camYaw = df.iloc[i]['camYaw']
camPitch = 0
if meanPose:
yawSMPL = 0
else:
yawSMPL = df.iloc[i]['Yaw'][pNum]
# scans have a 90deg rotation, but for mean pose from vposer there is
# no such rotation
if meanPose:
rotMat, _ = cv2.Rodrigues(
np.array([[0, (yawSMPL) / 180 * np.pi, 0]], dtype=float))
else:
rotMat, _ = cv2.Rodrigues(
np.array([[0, ((yawSMPL - 90) / 180) * np.pi, 0]],
dtype=float))
camera_rotationMatrix, _ = cv2.Rodrigues(
np.array([0, ((-camYaw) / 180) * np.pi, 0]).reshape(3, 1))
camera_rotationMatrix2, _ = cv2.Rodrigues(
np.array([camPitch / 180 * np.pi, 0, 0]).reshape(3, 1))
# flip pose
R_mod = cv2.Rodrigues(np.array([np.pi, 0, 0]))[0]
R_root = cv2.Rodrigues(globalOrient.reshape(-1))[0]
new_root = R_root.dot(R_mod)
globalOrient = cv2.Rodrigues(new_root)[0].reshape(3)
# apply camera matrices
globalOrient = self.rotate_global_orient(rotMat, globalOrient)
globalOrient = self.rotate_global_orient(camera_rotationMatrix,
globalOrient)
globalOrient = self.rotate_global_orient(camera_rotationMatrix2,
globalOrient)
return globalOrient
@staticmethod
def rotate_global_orient(rotMat, global_orient):
"""Transform global orientation given rotation matrix.
Args:
rotMat: rotation matrix
global_orient: original global orientation
Returns:
new_global_orient: transformed global orientation
"""
new_global_orient = cv2.Rodrigues(
np.dot(rotMat,
cv2.Rodrigues(global_orient.reshape(-1))[0]))[0].T[0]
return new_global_orient
def convert_by_mode(self, dataset_path: str, out_path: str,
mode: str) -> dict:
"""
Args:
dataset_path (str): Path to directory where raw images and
annotations are stored.
out_path (str): Path to directory to save preprocessed npz file
mode (str): Mode in accepted modes
Returns:
dict:
A dict containing keys image_path, bbox_xywh, keypoints2d,
keypoints3d, keypoints2d_mask, keypoints3d_mask, meta
stored in HumanData() format
"""
# use HumanData to store all data
human_data = HumanData()
# structs we use
image_path_, bbox_xywh_, keypoints2d_, keypoints3d_ = [], [], [], []
# get a list of .pkl files in the directory
img_path = os.path.join(dataset_path, 'images', mode)
body_model = {}
body_model['body_pose'] = []
body_model['global_orient'] = []
body_model['betas'] = []
body_model['transl'] = []
if self.fit == 'smplx':
annot_path = os.path.join(dataset_path, 'camera_dataframe')
body_model['left_hand_pose'] = []
body_model['right_hand_pose'] = []
body_model['expression'] = []
body_model['leye_pose'] = []
body_model['reye_pose'] = []
body_model['jaw_pose'] = []
num_keypoints = 127
keypoints_convention = 'agora'
num_body_pose = 21
else:
annot_path = os.path.join(dataset_path, 'camera_dataframe_smpl')
num_keypoints = 45
keypoints_convention = 'smpl_45'
num_body_pose = 23
meta = {}
meta['gender'] = []
meta['age'] = []
meta['ethnicity'] = []
meta['kid'] = []
meta['occlusion'] = []
# go through all the .pkl files
annot_dataframes = [
os.path.join(annot_path, f) for f in os.listdir(annot_path)
if f.endswith('.pkl') and '{}'.format(mode) in f
]
spin_fits = []
for filename in tqdm(sorted(annot_dataframes)):
df = pickle.load(open(filename, 'rb'))
for idx in tqdm(range(len(df))):
imgname = df.iloc[idx]['imgPath']
if self.res == (1280, 720):
imgname = imgname.replace('.png', '_1280x720.png')
img_path = os.path.join('images', mode, imgname)
valid_pers_idx = np.where(df.iloc[idx].at['isValid'])[0]
for pidx in valid_pers_idx:
# obtain meta data
gender = df.iloc[idx]['gender'][pidx]
age = df.iloc[idx]['age'][pidx]
kid = df.iloc[idx]['kid'][pidx]
occlusion = df.iloc[idx]['occlusion'][pidx]
ethnicity = df.iloc[idx]['ethnicity'][pidx]
# obtain keypoints
keypoints2d = df.iloc[idx]['gt_joints_2d'][pidx]
if self.res == (1280, 720):
keypoints2d *= (720 / 2160)
keypoints3d = df.iloc[idx]['gt_joints_3d'][pidx]
gt_bodymodel_path = os.path.join(
dataset_path,
df.iloc[idx][f'gt_path_{self.fit}'][pidx])
gt_bodymodel_path = gt_bodymodel_path.replace(
'.obj', '.pkl')
ann = pickle.load(open(gt_bodymodel_path, 'rb'))
if self.fit == 'smplx':
# obtain smplx data
body_model['body_pose'].append(ann['body_pose'])
global_orient = ann['global_orient']
body_model['betas'].append(
ann['betas'].reshape(-1)[:10])
body_model['transl'].append(ann['transl'])
body_model['left_hand_pose'].append(
ann['left_hand_pose'])
body_model['right_hand_pose'].append(
ann['right_hand_pose'])
body_model['jaw_pose'].append(ann['jaw_pose'])
body_model['leye_pose'].append(ann['leye_pose'])
body_model['reye_pose'].append(ann['reye_pose'])
body_model['expression'].append(ann['expression'])
else:
# obtain smpl data
body_model['body_pose'].append(
ann['body_pose'].cpu().detach().numpy())
global_orient = ann['root_pose'].cpu().detach().numpy()
body_model['betas'].append(
ann['betas'].cpu().detach().numpy().reshape(
-1)[:10])
body_model['transl'].append(
ann['translation'].cpu().detach().numpy())
# print(len(global_orient.reshape(-1)))
# print(len(ann['body_pose'].reshape(-1)))
# print(len(ann['betas'].reshape(-1)[:10]))
# raise '111'
global_orient = self.get_global_orient(
img_path, df, idx, pidx, global_orient.reshape(-1))
spin_fits.append(np.concatenate(
[global_orient.reshape(-1), ann['body_pose'].reshape(-1),
ann['betas'].reshape(-1)[:10]], axis=0))
break
break
# # add confidence column
# keypoints2d = np.hstack(
# [keypoints2d, np.ones((num_keypoints, 1))])
# keypoints3d = np.hstack(
# [keypoints3d, np.ones((num_keypoints, 1))])
#
# bbox_xyxy = [
# min(keypoints2d[:, 0]),
# min(keypoints2d[:, 1]),
# max(keypoints2d[:, 0]),
# max(keypoints2d[:, 1])
# ]
# bbox_xyxy = self._bbox_expand(bbox_xyxy, scale_factor=1.2)
# bbox_xywh = self._xyxy2xywh(bbox_xyxy)
#
# keypoints2d_.append(keypoints2d)
# keypoints3d_.append(keypoints3d)
# bbox_xywh_.append(bbox_xywh)
# image_path_.append(img_path)
# body_model['global_orient'].append(global_orient)
# meta['gender'].append(gender)
# meta['age'].append(age)
# meta['kid'].append(kid)
# meta['occlusion'].append(occlusion)
# meta['ethnicity'].append(ethnicity)
# # change list to np array
# if self.fit == 'smplx':
# body_model['left_hand_pose'] = np.array(
# body_model['left_hand_pose']).reshape((-1, 15, 3))
# body_model['right_hand_pose'] = np.array(
# body_model['right_hand_pose']).reshape((-1, 15, 3))
# body_model['expression'] = np.array(
# body_model['expression']).reshape((-1, 10))
# body_model['leye_pose'] = np.array(
# body_model['leye_pose']).reshape((-1, 3))
# body_model['reye_pose'] = np.array(
# body_model['reye_pose']).reshape((-1, 3))
# body_model['jaw_pose'] = np.array(body_model['jaw_pose']).reshape(
# (-1, 3))
#
# body_model['body_pose'] = np.array(body_model['body_pose']).reshape(
# (-1, num_body_pose, 3))
# body_model['global_orient'] = np.array(
# body_model['global_orient']).reshape((-1, 3))
# body_model['betas'] = np.array(body_model['betas']).reshape((-1, 10))
# body_model['transl'] = np.array(body_model['transl']).reshape((-1, 3))
#
# meta['gender'] = np.array(meta['gender'])
# meta['age'] = np.array(meta['age'])
# meta['kid'] = np.array(meta['kid'])
# meta['occlusion'] = np.array(meta['occlusion'])
# meta['ethnicity'] = np.array(meta['ethnicity'])
#
# bbox_xywh_ = np.array(bbox_xywh_).reshape((-1, 4))
# bbox_xywh_ = np.hstack([bbox_xywh_, np.ones([bbox_xywh_.shape[0], 1])])
#
# # change list to np array
# keypoints2d_ = np.array(keypoints2d_).reshape((-1, num_keypoints, 3))
# keypoints2d_, mask = convert_kps(keypoints2d_, keypoints_convention,
# 'human_data')
# keypoints3d_ = np.array(keypoints3d_).reshape((-1, num_keypoints, 4))
# keypoints3d_, _ = convert_kps(keypoints3d_, keypoints_convention,
# 'human_data')
#
# human_data['image_path'] = image_path_
# human_data['bbox_xywh'] = bbox_xywh_
# human_data['keypoints2d_mask'] = mask
# human_data['keypoints3d_mask'] = mask
# human_data['keypoints2d'] = keypoints2d_
# human_data['keypoints3d'] = keypoints3d_
# human_data['meta'] = meta
# human_data['config'] = 'agora'
# if self.fit == 'smplx':
# human_data['smplx'] = body_model
# else:
# human_data['smpl'] = body_model
# human_data.compress_keypoints_by_mask()
#
# # store data
# if not os.path.isdir(out_path):
# os.makedirs(out_path)
#
# file_name = f'agora_{mode}_{self.fit}.npz'
# out_file = os.path.join(out_path, file_name)
# human_data.dump(out_file)
np.save(os.path.join(out_path, 'agora_fits_smpl_temp.npy'), np.array(spin_fits)) |
from app import db
from models import User
u = User(username='admin', email='admin@example.com',
password_hash='b3282a2f2a28757b3a18ab833de16a9c54518c0b0cf493e3f0a7cf09386f326a')
db.session.add(u)
db.session.commit()
|
##################################################################
# R E A D M E
##################################################################
'''
Methods to launch this script:
- from command line
- three main parameters: -i, -o, -l
-i specifies the input file
- if -i is not used, all files in the current directory and all subdirectories (except _LUTs and _OUTPUT folders) will be used
-o specifies where to output
- if -o is not used, the output file path will automatically be the same as the input, under the _OUTPUT folder
-l specifies the LUT used (from _LUTs folder)
- if -l is not used, lut-day.png is used
version 1.0
2019-11-08
'''
import argparse
import sys
import os
import math
from PIL import Image
def load_lut(lut_filename):
global lut_3d
lut_3d = {}
# read the LUT image
if os.path.isfile(lut_filename) == False:
lut_path = os.path.join('_LUTs', lut_filename)
lut = Image.open(lut_path)
else:
lut = Image.open(lut_filename)
# populate the LUT dictionary
n = 0
for y in range(0, lut.height):
for x in range(0, lut.width):
x_local = x%16
z = math.floor(x/16)
coord = (x_local, y, z)
lut_3d[str(coord)] = lut.getpixel( (x, y) )
#print(x,y,':',lut.getpixel((x,y)))
n+=1
# print the LUT dictionary (debug)
'''
i = 0
for t in lut_3d.items():
zz = math.floor(i/(16*16))
print(zz, i, t)
i+=1
'''
def lerp_color(color1, color2, factor):
output_R = color1[0] + ( (color2[0] - color1[0]) * factor)
output_G = color1[1] + ( (color2[1] - color1[1]) * factor)
output_B = color1[2] + ( (color2[2] - color1[2]) * factor)
return (output_R, output_G, output_B)
def lerp_plane(r_min, r_max, g_min, g_max, b, input):
i00 = (r_min, g_min, b)
i01 = (r_max, g_min, b)
i10 = (r_min, g_max, b)
i11 = (r_max, g_max, b)
lut00 = lut_3d[str(i00)]
lut01 = lut_3d[str(i01)]
lut10 = lut_3d[str(i10)]
lut11 = lut_3d[str(i11)]
R_interpolate = ( input[0] - (r_min*17) )/17
G_interpolate = ( input[1] - (g_min*17) )/17
c0 = lerp_color(lut00, lut01, R_interpolate)
c1 = lerp_color(lut10, lut11, R_interpolate)
return lerp_color(c0, c1, G_interpolate)
def process_image(input_image_path, output_image_path):
input_img = Image.open(input_image_path)
output_img = Image.new('RGBA', (input_img.width, input_img.height), color = (0,0,0,0))
for y in range(0, input_img.height):
for x in range(0, input_img.width):
# get RGB of pixel
if input_img.mode != 'RGBA':
input_img = input_img.convert('RGBA')
input_pix = input_img.getpixel( (x, y) )
input_R = input_pix[0]
input_G = input_pix[1]
input_B = input_pix[2]
alpha = input_pix[3]
if alpha != 0:
# find cage
cage_R_min = math.floor( input_R/17 )
cage_R_max = math.ceil( input_R/17 )
cage_G_min = math.floor( input_G/17 )
cage_G_max = math.ceil( input_G/17 )
cage_B_min = math.floor( input_B/17 )
cage_B_max = math.ceil( input_B/17 )
B_interpolate = ( input_B - (cage_B_min*17) )/17
plane0 = lerp_plane(cage_R_min, cage_R_max, cage_G_min, cage_G_max, cage_B_min, input_pix)
plane1 = lerp_plane(cage_R_min, cage_R_max, cage_G_min, cage_G_max, cage_B_max, input_pix)
output = lerp_color(plane0, plane1, B_interpolate)
output_RGBA = ( int(output[0]), int(output[1]), int(output[2]), alpha)
# put pixel in output image
output_img.putpixel( (x,y) , output_RGBA)
# save output image
output_folder = os.path.dirname(output_image_path)
os.makedirs(output_folder, exist_ok = True)
output_img.save(output_image_path)
print('Saved', output_image_path)
def run(options):
lut_filename = options['lut']
load_lut(lut_filename)
if options['input'] != '':
input_filename = options['input']
output_filename = options['output']
if options['output'] == '':
output_filename = os.path.join('_OUTPUT', input_filename)
process_image(input_filename, output_filename)
else:
# walk through the directory and process all PNG files
current_script_path = os.path.realpath(__file__)
current_script_directory = os.path.dirname(current_script_path)
print('script_directory:', current_script_directory)
walked_instruction_list = []
for root, dirs, files in os.walk(current_script_directory):
for f in files:
if '_LUTs' not in root:
if '_OUTPUT' not in root:
if f[-4:].lower() == '.png':
walked_instruction_list.append( os.path.join(root, f) )
for i in walked_instruction_list:
input_path = i
relative_input_path_i = i
relative_input_path = relative_input_path_i.replace(current_script_directory, '')[1:]
relative_output_path = os.path.join('_OUTPUT', relative_input_path)
process_image(input_path, relative_output_path)
print('Walked instructions detected:\n', walked_instruction_list)
if __name__ == '__main__':
if len(sys.argv) > 2:
parser = argparse.ArgumentParser(description = 'Process some arguments.')
parser.add_argument('-i','--input',
help='File to process.'
)
parser.add_argument('-o','--output',
help='File to output.'
)
parser.add_argument('-l','--lut',
help='Override which LUT file to use.'
)
options = vars(parser.parse_args())
default_values = [
('input', ''),
('output', ''),
('lut', 'lut-day.png')
]
for name, def_value in default_values:
if not options[name]:
options[name] = def_value
else:
input_var = ''
if len(sys.argv) > 1:
input_var = sys.argv[1]
options = {
'input' : input_var,
'output': '',
'lut' : 'lut-day.png'
}
run(options)
|
from os import listdir, makedirs
from os.path import isfile, isdir, join, exists, dirname
def file_listing(dir, extension=None):
'''
List all files (exclude dirs) in specified directory with (optional) given extension
Args:
dir (string): Director full path
extension (string): (optional) Extension of files to filter
'''
files = [join(dir, f) for f in listdir(dir) if isfile(join(dir, f))]
if extension:
files = list(filter(lambda f: f.endswith('.' + extension), files))
return files
def dir_listing(base_dir):
'''
List all subdirectories of given base dir
Args:
base_dir (string): Base directory to search
'''
return [join(base_dir, d) for d in listdir(base_dir) if isdir(join(base_dir, d))]
def mkdir(path):
'''
Make directory in specified path *recursively*
Args:
path (string): Directory path to create
'''
if not exists(path):
makedirs(path)
def last_component(path):
'''Get paths last component (split by `/`) including file extension'''
return list(filter(None, path.split('/')))[-1]
def file_exists(path):
'''Check whether path exists'''
return isfile(path)
def relative_path(path):
'''Get path relative to file itself instead of dir program was started in'''
base_dir = dirname(__file__)
return join(base_dir, path)
def get_file_name(filepath):
'''Get file name - excluding file extension'''
return last_component(filepath).split('.')[-2]
def take(gen, n):
'''
Take n elements from generator
Args:
gen (iterable): Generator instance
n (int): Number of elements to take
Returns:
List of values from generator
'''
lst = []
for _ in range(n):
lst.append(next(gen))
return lst
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 07:16:35 2018
@author: MiguelArturo
"""
__author__ = "Miguel Barreto Sanz"
__copyright__ = "Copyright 2018, Miguel Barreto Sanz"
__credits__ = ["Miguel Barreto Sanz"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Miguel Barreto Sanz"
__email__ = "miguelbarretosanz@gmail.com"
__status__ = "Development"
from math import log, sqrt
import numpy as np
import pandas as pd
from bokeh.plotting import figure
#Import modules for interactive graphics
from bokeh.layouts import row, column
from bokeh.models import HoverTool, ColumnDataSource, Select
from bokeh.io import curdoc
#Import modules for conversions to radians.
import math
#Import modules for time management and time zones
import time, datetime
import pytz
from pytz import timezone
#Color palette
import seaborn as sns
def make_plot(source):
"""
Plot the annular wedges
Parameters
----------
source : ColumnDataSources
Returns
-------
return : Figure
"""
hover = HoverTool(
names=["anular_wedges"],
tooltips=[
("Activity", "@Name"),
("color", "@color"),
("Time Zone","@Time_Zone"),
])
plot = figure(width=700, height=700,tools=[hover], title="",x_axis_type=None, y_axis_type=None, x_range=(-420, 420), y_range=(-420, 420),
min_border=0, outline_line_color="white", background_fill_color="#ffffff",)
plot.annular_wedge(x=0, y=0, inner_radius='inner_radius', outer_radius='outer_radius',start_angle='start_angle', end_angle='end_angle',
color='color', alpha=0.9, hover_color='color',hover_line_color="black", hover_alpha = 0.5, source=source,name="anular_wedges",legend='Name')
#Fixed attributes
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None
#plot clock
angles = 2*np.pi/24*pd.Series(list(range(0,24)))
plot.annular_wedge(0, 0, fr_inner_radius, tr_outer_radius, angles, angles, color="lightgrey")
# Plot clock labels (24 hours)
labels = np.power(10.0, np.arange(-3, 4))
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = ((tr_outer_radius + 10) - fr_inner_radius) / (minr - maxr)
b = fr_inner_radius - a * maxr
radii = a * np.sqrt(np.log(labels * 1E4)) + b
xr = radii[0]*np.cos(np.array(angles))
yr = radii[0]*np.sin(np.array(angles))
label_angle=np.array(angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
labels_24h_clock = list(range(6,-1,-1)) + list(range(23,6,-1))
plot.text(xr, yr, pd.Series(labels_24h_clock), angle=label_angle, text_font_size="9pt", text_align="center", text_baseline="middle",
text_color="lightgrey")
return plot
def get_dataset (src, unique_days_list, selected_day, df_activity_colors):
def calculate_angles(start_time, duration, start_date, end_date, selected_day):
if start_date == selected_day and end_date == selected_day:
#Convert HH:MM:SS format in radians
ts = time.strptime(start_time, "%H:%M:%S")
hour = (ts[3] + (ts[4]/60) + (ts[5]/3600))
hour_rad = math.radians(hour * 15.0)
#add "pi/2" to transform radians to a 24 hours clock form.
hour_in_radians_to_plot = -hour_rad + np.pi/2
#Use duration and convert seconds in radians
sec_rad = time.gmtime(duration)
hour_duration = (sec_rad[3] + (sec_rad[4]/60) + (sec_rad[5]/3600))
hour_rad_duration = math.radians(hour_duration * 15.0)
duration_in_radians_to_plot = (hour_in_radians_to_plot + hour_rad_duration)
start_angle= hour_in_radians_to_plot - hour_rad_duration
end_angle= duration_in_radians_to_plot - hour_rad_duration
else:
start_angle = 1.4
end_angle = 1.5
return start_angle, end_angle
def convert_time_zone(in_time_zone):
if in_time_zone == "GMT+0":
time_zone = "Africa/Casablanca"
elif in_time_zone == "GMT+1":
time_zone = "Europe/Amsterdam"
elif in_time_zone == "CET":
time_zone = "CET"
elif in_time_zone == "CEST":
time_zone = "Africa/Johannesburg"
elif in_time_zone == "GMT+10":
time_zone = "Australia/Brisbane"
elif in_time_zone == "GMT+11":
time_zone = "Etc/GMT+11"
elif in_time_zone == "GMT+12":
time_zone = "Etc/GMT+12"
elif in_time_zone == "GMT+2":
time_zone = "Africa/Johannesburg"
elif in_time_zone == "GMT+3":
time_zone = "Asia/Istanbul"
elif in_time_zone == "GMT+4":
time_zone = "Asia/Dubai"
elif in_time_zone == "GMT+5":
time_zone = "Asia/Aqtobe"
elif in_time_zone == "GMT+6":
time_zone = "Asia/Thimbu"
elif in_time_zone == "GMT+7":
time_zone = "Asia/Jakarta"
elif in_time_zone == "GMT+8":
time_zone = "Asia/Hong_Kong"
elif in_time_zone == "GMT+9":
time_zone = "Asia/Tokyo"
elif in_time_zone == "GMT-0":
time_zone = "Atlantic/St_Helena"
elif in_time_zone == "GMT-1":
time_zone = "Atlantic/Cape_Verde"
elif in_time_zone == "GMT-10":
time_zone = "Pacific/Honolulu"
elif in_time_zone == "GMT-11":
time_zone = "US/Samoa"
elif in_time_zone == "GMT-2":
time_zone = "Brazil/DeNoronha"
elif in_time_zone == "GMT-4":
time_zone = "America/Curacao"
elif in_time_zone == "GMT-5":
time_zone = "America/Cancun"
elif in_time_zone == "GMT-6":
time_zone = "America/Costa_Rica"
elif in_time_zone == "GMT-7":
time_zone = "America/Dawson_Creek"
elif in_time_zone == "GMT-8":
time_zone = "Pacific/Pitcairn"
elif in_time_zone == "GMT-9":
time_zone = "Pacific/Gambier"
elif in_time_zone == "GMT0":
time_zone = "Atlantic/St_Helena"
else:
print("No time zone found")
return time_zone
#Group all the events from the same day
index_hours_same_day = np.where(unique_days_list==
datetime.datetime.strptime(selected_day, "%Y-%m-%d").date())
events_at_day = src.Start_Date_UTC[list(index_hours_same_day[0][:])]
events_at_day = pd.to_datetime(events_at_day)
end_time_events_at_day = src.End_Date_UTC[list(index_hours_same_day[0][:])]
end_time_events_at_day = pd.to_datetime(end_time_events_at_day)
#Time zone correction
events_at_day = events_at_day.dt.tz_localize('UTC')
#Get the time zone from "Start_Time_Local"
get_tz = LC_data.Start_Time_Local[index_hours_same_day[0][0]].split(" ")
in_time_zone = get_tz[3]
time_zone = convert_time_zone(in_time_zone)
events_at_day = events_at_day.dt.tz_convert(time_zone)
events_at_day.name = 'Start_Time_Local'
#Get the time zone from "End_time_Local"
end_time_events_at_day = end_time_events_at_day.dt.tz_localize('UTC')
get_tz_end = LC_data.End_time_Local[index_hours_same_day[0][0]].split(" ")
in_time_zone_end_date = get_tz_end[3]
time_zone_end = convert_time_zone(in_time_zone_end_date)
end_time_events_at_day = end_time_events_at_day.dt.tz_convert(time_zone_end)
end_time_events_at_day.name = 'End_time_Local'
#Select start time from timestamp
start_time_list_to_plot = events_at_day.dt.time
start_time_list_to_plot_dt = start_time_list_to_plot.to_frame()
start_date = events_at_day.dt.date.to_frame()
start_date.columns = ['start_date']
#get durations and events
duration_list_to_plot = src.iloc[events_at_day.index[:],[4]]
events_list_to_plot = src.iloc[events_at_day.index[:],[5]]
#Select end time from timestamp
end_time_events_at_day_to_plot = end_time_events_at_day.dt.time
end_time_events_at_day_to_plot_dt = end_time_events_at_day_to_plot.to_frame()
end_date = end_time_events_at_day.dt.date.to_frame()
end_date.columns = ['end_date']
#Dataframe with time zones
df_tz = pd.DataFrame(index=range(0,events_at_day.index.size),columns=['Time_Zone'])
for i in range(0, events_at_day.index.size):
df_tz['Time_Zone'][i]= time_zone
#Dataframe with "event duration" and "start time"
duration_list_to_plot.reset_index(drop=True, inplace=True)
events_list_to_plot.reset_index(drop=True, inplace=True)
start_time_list_to_plot_dt.reset_index(drop=True, inplace=True)
end_time_events_at_day_to_plot_dt.reset_index(drop=True, inplace=True)
start_date.reset_index(drop=True, inplace=True)
end_date.reset_index(drop=True, inplace=True)
result2 = pd.concat([duration_list_to_plot, events_list_to_plot, start_time_list_to_plot_dt,
end_time_events_at_day_to_plot_dt, start_date, end_date] , axis=1)
df_start_end_angle = pd.DataFrame(index=range(0,result2.index.size),columns=['start_angle','end_angle'])
for i in range(0, result2.index.size):
s_d = str(result2.iloc[i]['Start_Time_Local'])
du = result2.iloc[i]['Duration']
start_date = str(result2.iloc[i]['start_date'])
end_date = str(result2.iloc[i]['end_date'])
angles = calculate_angles(s_d, du, start_date, end_date, selected_day)
df_start_end_angle['start_angle'][i]= angles[0]
df_start_end_angle['end_angle'][i] = angles[1]
df_inner_outer_radius = pd.DataFrame(index=range(0,result2.index.size),columns=['inner_radius','outer_radius'])
for i in range(0, result2.index.size):
df_inner_outer_radius['inner_radius'][i]= fr_inner_radius
df_inner_outer_radius['outer_radius'][i] = fr_outer_radius
#Match events with its respective color code
df_colors = pd.DataFrame(index=range(0,events_list_to_plot.index.size),columns=['color'])
for i in range(0,events_list_to_plot.index.size):
df_colors.color[i] = df_activity_colors.Colors[np.where(events_list_to_plot.Name[i] == df_activity_colors.Activities)[0][0]]
final_df = pd.concat([df_start_end_angle,df_colors,df_inner_outer_radius,events_list_to_plot, df_tz] , axis=1)
return ColumnDataSource(data=final_df)
def update_plot(attrname, old, new):
selected_day = select_day.value
src = get_dataset(LC_data,unique_days_list,selected_day,df_activity_colors)
source.data.update(src.data)
def activities_color_table (array_activities):
df_activity_colors = pd.DataFrame(index=range(0,array_activities.size,1),columns=['Activities','Colors'])
#create palette
pal2 = sns.color_palette('pastel').as_hex()
pal3 = sns.color_palette("Set1", 10).as_hex()
pal4 = sns.color_palette("Set2", 10).as_hex()
pal5 = sns.color_palette("Set3", 10).as_hex()
pal6 = sns.color_palette("BrBG", 7).as_hex()
pal7 = sns.color_palette("RdBu_r", 7).as_hex()
pal8 = sns.color_palette("coolwarm", 7).as_hex()
pal9 = sns.diverging_palette(10, 220, sep=80, n=7).as_hex()
palette = np.concatenate((pal2,pal3,pal4,pal5,pal6,pal7,pal8,pal9), axis=0)
for i in range(0,array_activities.size,1):
df_activity_colors['Activities'][i]=array_activities[i]
df_activity_colors['Colors'][i] = palette[i]
return df_activity_colors
#Fixed plot's atributes
fr_inner_radius = 140 #First ring (fr) parameters
fr_outer_radius = 200
sr_inner_radius = fr_outer_radius+2 #Second ring (sr) parameters
sr_outer_radius = fr_outer_radius+52
tr_inner_radius = fr_outer_radius+52+2, #third ring (tr) parameters
tr_outer_radius = fr_outer_radius+52+2+42
LC_data = pd.read_csv('../data/Life Cycle/example/LC_export 3.csv')
#Columns names were changed because the orinals have some espaces and special characters
# that makes more complicated the string manipulation. For instace : ' NAME' , 'START DATE(UTC)'.
LC_data.columns = ['Start_Date_UTC', 'End_Date_UTC','Start_Time_Local','End_time_Local','Duration','Name','Location']
#Convert 'Start_Date' to datetime64[ns] to use pands Time Series / Date functionality.
#To-do : the function "to_datetime" in converting 'Start_Time_Local' to UTC I wanto to keep in local time.
LC_data['Start_Date_UTC'] = pd.to_datetime(LC_data.Start_Date_UTC)
#Get all the events' timestamps per unique selected day
unique_days_list = LC_data.Start_Date_UTC.dt.date
index_hours_same_day = np.where(unique_days_list==unique_days_list.unique()[2])
index_hours_same_day[0][4]
events_at_day = LC_data.Start_Date_UTC[list(index_hours_same_day[0][:])]
#Create a dataframe to store unique_days_list
columns_ud = ['Unique_Days']
New_data_days_unique = pd.DataFrame(unique_days_list.index,columns=columns_ud)
for i in New_data_days_unique.index:
New_data_days_unique['Unique_Days'][i]= pd.Timestamp.strftime(unique_days_list[i],'%Y-%m-%d')
#List to be shown in the "select button"
List_to_select_days = sorted(list(set(New_data_days_unique['Unique_Days'])))
#Colors table per activity
df_activity_colors = activities_color_table(LC_data.Name.unique())
selected_day='2017-01-22'
source=get_dataset(LC_data,unique_days_list,selected_day,df_activity_colors)
plot = make_plot(source)
#Timestamp selection
select_day = Select(title="Day", value="foo", options=List_to_select_days)
select_day.on_change('value', update_plot)
controls = column(select_day)
curdoc().add_root(row(plot, controls))
curdoc().title = "Sunburst" |
import tensorflow as tf
import numpy as np
from functools import partial
from keras.models import Model
from .. import layers
from ..base import K
from .module import conv2d, deconv2d, conv2dt, vae_sampling
def HGResiduleModule(inputs, out_channel, add_residule=True, kernel_initializer='glorot_uniform', **kwargs):
net = inputs
for ch, kernal in zip([out_channel // 2, out_channel // 2, out_channel], [1, 3, 1]):
net = conv2d(
net, ch, kernal,
strides=1,
padding='same',
kernel_initializer=kernel_initializer, **kwargs)
if add_residule:
res_net = conv2d(
inputs, out_channel, 1,
strides=1,
padding='same',
kernel_initializer=kernel_initializer, **kwargs)
net = layers.Add()([net, res_net])
return net
def ResiduleModule(x, out_channels, ks=3, s=1, kernel_initializer='glorot_uniform', activation='relu', batch_norm='InstanceNormalization2D', **kwargs):
in_channels = K.int_shape(x)[-1]
# conv
y = layers.BatchNormalization()(x)
y = conv2d(y, out_channels, ks, strides=1, padding='same', activation=activation,
batch_norm=batch_norm, kernel_initializer=kernel_initializer, **kwargs)
y = conv2d(y, out_channels, ks, strides=s, padding='same', activation=None,
batch_norm=batch_norm, kernel_initializer=kernel_initializer, **kwargs)
# residule
if in_channels != out_channels or s > 1:
x = conv2d(x, out_channels, 1, strides=s, padding='same', activation=None,
batch_norm=None, kernel_initializer=kernel_initializer, **kwargs)
return layers.Add()([y, x])
def Encoding2D(inputs, out_channel, down_sample=True, module='Residule', kernel_initializer='glorot_uniform', **kwargs):
module_fn = globals()['%sModule' % module] if type(
module) is str else module
net = module_fn(inputs, out_channel,
kernel_initializer=kernel_initializer,
**kwargs)
if down_sample:
net = conv2d(net, out_channel, 3, strides=2, padding='same',
kernel_initializer=kernel_initializer, **kwargs)
return net
def Hourglass(inputs, output_shape, depth=4, nf=64, module='HGResidule', kernel_initializer='glorot_uniform', **kwargs):
net = inputs
net = conv2d(
net, nf, 7,
strides=2,
padding='same',
batch_norm=None,
activation='relu',
dropout=None,
use_coordconv=kwargs['use_coordconv'] if 'use_coordconv' in kwargs else False,
kernel_initializer=kernel_initializer)
net = layers.MaxPool2D(pool_size=2)(net)
# Down sampling
skip_layers = []
for s in range(depth):
s = np.min([s, 3])
skip_layers.append(net)
net = Encoding2D(net, nf * 2 ** s, down_sample=True, activation='relu', kernel_initializer=kernel_initializer, module=module, **kwargs)
# feature compression
net = Encoding2D(
net,
nf * 2 ** depth,
down_sample=False,
activation='relu',
kernel_initializer=kernel_initializer,
module=module,
**kwargs
)
# up sampling
for s, s_layer in zip(range(depth), skip_layers[::-1]):
s = (depth - s - 1)
s = np.min([s, 3])
net = deconv2d(
net, nf * 2 ** s, 3,
kernel_initializer=kernel_initializer, activation='relu', **kwargs)
net = layers.Concatenate()([net, s_layer])
# output regress
net = deconv2d(
net,
nf,
kernel_size=3,
size=4,
padding='same',
activation='relu',
kernel_initializer=kernel_initializer, **kwargs)
prediction = conv2d(
net,
output_shape[-1],
kernel_size=3,
padding='same',
activation=None,
kernel_initializer=kernel_initializer)
return prediction
def Encoder2D(inputs, embedding, depth=2, nf=32, kernel_initializer='glorot_uniform', **kwargs):
net = conv2d(
inputs, nf, 3,
activation='relu',
kernel_initializer=kernel_initializer, **kwargs)
for s in range(1, depth):
s = np.min([s, 4])
net = Encoding2D(net, nf * 2 ** s, module='HGResidule', **kwargs)
s = np.min([depth, 4])
net = Encoding2D(net, nf * 2 ** s, module='HGResidule',
down_sample=False, **kwargs)
net = layers.Flatten()(net)
net = layers.Dense(embedding)(net)
net = layers.ReLU()(net)
return net
def Decoder2D(inputs, out_shape, depth=2, nf=32, kernel_initializer='glorot_uniform', **kwargs):
input_shape = np.array(list(np.array(
out_shape[:-1]) / np.power([2, 2], depth)) + [inputs.shape.as_list()[-1]]).astype(np.int)
net = layers.Dense(np.prod(input_shape))(inputs)
net = layers.ReLU()(net)
net = layers.Reshape(input_shape)(net)
for s in range(depth):
s = np.min([(depth - s - 1), 4])
net = deconv2d(
net, nf * 2 ** s, 3,
activation='relu',
kernel_initializer=kernel_initializer, **kwargs)
net = conv2d(
net,
out_shape[-1], (3, 3),
strides=1,
padding='same',
activation=None,
kernel_initializer=kernel_initializer, **kwargs)
return net
def AutoEncoder(inputs, output_shape, depth=2, embedding=128, nf=32, kernel_initializer='glorot_uniform', **kwargs):
embedding = Encoder2D(inputs, embedding,
depth=depth, nf=nf, **kwargs)
reconstruction = Decoder2D(
embedding, output_shape, depth=depth, nf=nf, **kwargs)
return reconstruction
def UNet(inputs, output_shape, nf=64, ks=4, **kwargs):
### UNet Definition ###
# image is (256 x 256 x input_c_dim)
e1 = conv2d(inputs, nf, ks, strides=2, padding='same',
activation=True, batch_norm='InstanceNormalization2D')
# e1 is (128 x 128 x self.gf_dim)
e2 = conv2d(e1, nf*2, ks, strides=2, padding='same',
activation=True, batch_norm='InstanceNormalization2D')
# e2 is (64 x 64 x self.gf_dim*2)
e3 = conv2d(e2, nf*4, ks, strides=2, padding='same',
activation=True, batch_norm='InstanceNormalization2D')
# e3 is (32 x 32 x self.gf_dim*4)
e4 = conv2d(e3, nf*8, ks, strides=2, padding='same',
activation=True, batch_norm='InstanceNormalization2D')
# e4 is (16 x 16 x self.gf_dim*8)
e5 = conv2d(e4, nf*8, ks, strides=2, padding='same',
activation=True, batch_norm='InstanceNormalization2D')
# e5 is (8 x 8 x self.gf_dim*8)
e6 = conv2d(e5, nf*8, ks, strides=2, padding='same',
activation=True, batch_norm='InstanceNormalization2D')
# e6 is (4 x 4 x self.gf_dim*8)
e7 = conv2d(e6, nf*8, ks, strides=2, padding='same',
activation=True, batch_norm='InstanceNormalization2D')
# e7 is (2 x 2 x self.gf_dim*8)
e8 = conv2d(e7, nf*8, ks, strides=2, padding='same',
activation=True, batch_norm='InstanceNormalization2D')
# e8 is (1 x 1 x self.gf_dim*8)
d1 = deconv2d(e8, nf*8, ks, padding='same',
activation='relu', batch_norm='InstanceNormalization2D', dropout=0.5)
d1 = layers.Concatenate()([d1, e7])
# d1 is (2 x 2 x self.gf_dim*8*2)
d2 = deconv2d(d1, nf*8, ks, padding='same',
activation='relu', batch_norm='InstanceNormalization2D', dropout=0.5)
d2 = layers.Concatenate()([d2, e6])
# d2 is (4 x 4 x self.gf_dim*8*2)
d3 = deconv2d(d2, nf*8, ks, padding='same',
activation='relu', batch_norm='InstanceNormalization2D', dropout=0.5)
d3 = layers.Concatenate()([d3, e5])
# d3 is (8 x 8 x self.gf_dim*8*2)
d4 = deconv2d(
d3, nf*8, ks, padding='same', activation='relu', batch_norm='InstanceNormalization2D')
d4 = layers.Concatenate()([d4, e4])
# d4 is (16 x 16 x self.gf_dim*8*2)
d5 = deconv2d(
d4, nf*4, ks, padding='same', activation='relu', batch_norm='InstanceNormalization2D')
d5 = layers.Concatenate()([d5, e3])
# d5 is (32 x 32 x self.gf_dim*4*2)
d6 = deconv2d(
d5, nf*2, ks, padding='same', activation='relu', batch_norm='InstanceNormalization2D')
d6 = layers.Concatenate()([d6, e2])
# d6 is (64 x 64 x self.gf_dim*2*2)
d7 = deconv2d(
d6, nf, ks, padding='same', activation='relu', batch_norm='InstanceNormalization2D')
d7 = layers.Concatenate()([d7, e1])
# d7 is (128 x 128 x self.gf_dim*1*2)
outputs = deconv2d(
d7, 3, ks, padding='same', activation='tanh', batch_norm=None)
# outputs is (256 x 256 x output_c_dim)
return outputs
def Discriminator(inputs, nf=64, depth=4, ks=4, return_endpoints=False, **kwargs):
net = inputs
for i in range(depth):
net = conv2d(
net,
nf * 2 ** i,
ks,
strides=2,
padding='same',
batch_norm='InstanceNormalization2D' if i > 0 else None,
activation=True,
**kwargs
)
validity = conv2d(
net,
1,
1,
strides=1,
padding='same',
batch_norm=False,
activation=False,
)
if return_endpoints:
return validity, net
return validity
def ResNet50(inputs, output_shape, nf=64, n_residule=9, module='Residule', with_deconv=True, with_dense=False, embeding=256, n_classes=100, dropout=0.3, **kwargs):
module_fn = globals()['%sModule' % module] if type(
module) is str else module
# The network with n blocks consists of: c7s1-32, d64, d128, R128, R128, R128,
# R128, R128, R128, R128, R128, R128, u64, u32, c7s1-3
net = conv2d(inputs, nf, 7, strides=1, padding='same',
activation='relu', batch_norm='InstanceNormalization2D', **kwargs)
net = conv2d(net, nf * 2, 3, strides=2, activation='relu',
batch_norm='InstanceNormalization2D', **kwargs)
net = conv2d(net, nf * 4, 3, strides=2, activation='relu',
batch_norm='InstanceNormalization2D', **kwargs)
# define G network with n resnet blocks
for _ in range(n_residule):
net = module_fn(net, nf*4, **kwargs)
if with_deconv:
net = deconv2d(net, nf*2, 3, size=2, activation='relu',
batch_norm='InstanceNormalization2D', **kwargs)
net = deconv2d(net, nf, 3, size=2, activation='relu',
batch_norm='InstanceNormalization2D', **kwargs)
net = conv2d(net, output_shape[-1], 7, strides=1,
padding='same', activation='tanh', batch_norm=None, **kwargs)
elif with_dense:
net = layers.Flatten()(net)
net = layers.Dropout(dropout)(net)
net = layers.Dense(embeding, activation='relu')(net)
net = layers.Dense(n_classes, activation='softmax')(net)
return net
def ArcFace(inputs, embeding, nf=64, n_classes=None, dropout=0.3, **kwargs):
net = layers.BatchNormalization()(inputs)
# input shape: 112 * 112 * c
net = conv2d(net, nf, 3, strides=1, padding='same',
activation=True, **kwargs)
# shape: 112 * 112 * 64
net = ResiduleModule(net, nf, s=2, **kwargs)
# shape: 56 * 56 * 64
net = ResiduleModule(net, nf, s=1, **kwargs)
# shape: 56 * 56 * 64
net = ResiduleModule(net, nf, s=1, **kwargs)
# shape: 56 * 56 * 64
net = ResiduleModule(net, nf*2, s=2, **kwargs)
# shape: 28 * 28 * 128
for _ in range(12):
net = ResiduleModule(net, nf*2, s=1, **kwargs)
# shape: 28 * 28 * 128
net = ResiduleModule(net, nf*4, s=2, **kwargs)
# shape: 14 * 14 * 256
for _ in range(29):
net = ResiduleModule(net, nf*4, s=1, **kwargs)
# shape: 14 * 14 * 256
net = ResiduleModule(net, nf*8, s=2, **kwargs)
# shape: 7 * 7 * 512
for _ in range(2):
net = ResiduleModule(net, nf*8, s=1, **kwargs)
# shape: 7 * 7 * 512
net = layers.BatchNormalization()(net)
net = layers.Dropout(dropout)(net)
net = layers.Flatten()(net)
embeding = layers.Dense(embeding, activation=None, name='feature_embeddings')(net)
if n_classes:
norm_embedding = layers.ArcDense(n_classes)(embeding)
return embeding, norm_embedding
return embeding
def VAE(inputs, nf=32, ks=3, depth=2, embedding=64, latent=16, return_models=False, *args, **kwargs):
# VAE model = encoder + decoder
# build encoder model
output_shape = K.int_shape(inputs)
x = inputs
for i in range(depth):
x = conv2d(x, nf * (2**i), ks, strides=2,
activation='relu', *args, **kwargs)
# shape info needed to build decoder model
embedding_shape = K.int_shape(x)
# generate latent vector Q(z|X)
x = layers.Flatten()(x)
x = layers.Dense(embedding, activation='relu')(x)
z_mean = layers.Dense(latent, name='z_mean')(x)
z_log_var = layers.Dense(latent, name='z_log_var')(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = layers.Lambda(vae_sampling, output_shape=(
latent,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(
inputs,
[z_mean, z_log_var, z],
name='encoder')
# build decoder model
latent_inputs = layers.Input(shape=(latent,), name='z_sampling')
x = layers.Dense(embedding_shape[1] * embedding_shape[2]
* embedding_shape[3], activation='relu')(latent_inputs)
x = layers.Reshape(
(embedding_shape[1], embedding_shape[2], embedding_shape[3]))(x)
for i in range(depth-1, -1, -1):
x = deconv2d(x, nf*2**i, ks, strides=1, size=2,
activation='relu', *args, **kwargs)
recon = conv2d(x, output_shape[-1], ks,
strides=1, activation=None, *args, **kwargs)
# instantiate decoder model
decoder = Model(latent_inputs, recon, name='decoder')
# instantiate VAE model
outputs = decoder(encoder(inputs)[-1])
if return_models:
return outputs, [encoder, decoder]
return outputs
|
"""GeoNet NZ Volcanic Alert Level feed."""
import logging
from datetime import datetime
from typing import Optional
import pytz
from aio_geojson_client.feed import GeoJsonFeed
from aiohttp import ClientSession
from aio_geojson_geonetnz_volcano.consts import URL
from .feed_entry import GeonetnzVolcanoFeedEntry
_LOGGER = logging.getLogger(__name__)
class GeonetnzVolcanoFeed(GeoJsonFeed):
"""GeoNet NZ Volcanic Alert Level feed."""
def __init__(self, websession: ClientSession, home_coordinates, filter_radius=None):
"""Initialise this service."""
super().__init__(websession, home_coordinates, URL, filter_radius=filter_radius)
def __repr__(self):
"""Return string representation of this feed."""
return "<{}(home={}, url={}, radius={})>".format(
self.__class__.__name__,
self._home_coordinates,
self._url,
self._filter_radius,
)
def _new_entry(self, home_coordinates, feature, global_data):
"""Generate a new entry."""
return GeonetnzVolcanoFeedEntry(home_coordinates, feature)
def _now(self):
"""Return now with timezone."""
return datetime.now(pytz.utc)
def _extract_last_timestamp(self, feed_entries) -> Optional[datetime]:
"""This feed does not provide a timestamp."""
return None
def _extract_from_feed(self, feed) -> Optional:
"""Extract global metadata from feed."""
return None
|
from enum import Enum
class SharingType(Enum):
UNLIMITED = "UNLIMITED"
PERMISSIONS_LIMITED = "PERMISSIONS_LIMITED"
DISABLED = "DISABLED"
class ObjectType(Enum):
ARTIFACT = "ARTIFACT"
FILE = "FILE"
FOLDER = "FOLDER"
class FolderType(Enum):
STANDARD = "STANDARD"
SHARES = "SHARES"
PERSONAL = "PERSONAL"
class ErrorCode(Enum):
ARTIFACT_NOT_FOUND = "ARTIFACT_NOT_FOUND"
FILE_GONE = "FILE_GONE"
FILE_NOT_FOUND = "FILE_NOT_FOUND"
FOLDER_GONE = "FOLDER_GONE"
FOLDER_NOT_FOUND = "FOLDER_NOT_FOUND"
FORBIDDEN = "FORBIDDEN"
UNPROCESSABLE_ENTITY = "UNPROCESSABLE_ENTITY"
|
from django.db import models
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.functional import cached_property
from .choices import (
AB_CHOICES,
APPLICATION_TYPE_CHOICES,
APP_SUBMISSION_TYPE_CHOICES,
BOOL_CHOICES,
CFDA_CHOICES,
FED_ID_REGION_CHOICES,
FED_ID_TYPE_CHOICES,
NOTE_TYPE_CHOICES,
PROGRAM_RESPONSIBILITY_TYPE_CHOICES,
RESEARCH_TYPE_CHOICES,
RWU_CHOICES,
SCIENCE_CODE_CHOICES,
STATUS_CHOICES,
WPAP_STATUS_CHOICES,
YEAR_CHOICES,
)
from contacts.models import AccomplishmentInstrument
class Grant(models.Model):
"""
Defines a grant and it's status through the full workflow.
Has a lot of related info in here, too.
"""
# CN may need to be viewable at some times, per the User Guide.
# Unclear what the use case might be, though.
# Per Jamie, it is a random number, but the first part is based on FY generated and org...
# so 2109**** would be from region 09
cn = models.CharField(
"Proposal ID",
max_length=34,
primary_key=True,
editable=False,
db_index=True,
help_text="A system-generated number used to identify the proposal before an agreement number is assigned or the instrument is executed.",
)
proj_title = models.CharField(
"Project title",
max_length=200,
db_index=True,
help_text="The short and concise name of the project. Do not include any acronyms, unit codes, or funding codes.",
)
proj_status = models.CharField(
"Project status",
max_length=15,
blank=True,
null=True,
choices=AB_CHOICES,
help_text="The Status of the grant or agreement (instrument).",
)
# Sys-generated, once initial proposal saved. Control number.
# Older field, not user viewable. Only used for historical records.
application_id = models.CharField(max_length=34, editable=False)
application_type = models.CharField(
"Type of application",
max_length=30,
choices=APPLICATION_TYPE_CHOICES,
default="Other",
help_text="""
Applies to instruments with Federal Financial Assistance (FFA).
Select OTHER if not FFA.
""",
)
app_submission_type = models.CharField(
"Type of Submission",
max_length=100,
choices=APP_SUBMISSION_TYPE_CHOICES,
default="Other",
help_text="""
Applies to instruments with Federal Financial Assistance (FFA).
Select OTHER if not FFA.
""",
)
app_submit_date = models.DateField(
"Application submitted",
help_text="The date the proposal was submitted for review or processing.",
)
app_received_date = models.DateField(
"Application received",
help_text="The date the proposal was received by the Forest Service.",
)
# No longer utilized, not in the application.
hhs_payment_ind = models.CharField(
max_length=1,
default="N",
editable=False,
help_text="Was only used by NRS (Northern Research Station) and NA Northeastern Area (S&PF).",
)
proposed_start_date = models.DateField(
help_text="""
The date the project is expected to start as negotiated in the agreement.
This date cannot be after the Expiration date.
"""
)
proposed_end_date = models.DateField(
help_text="""The date the project is expected to end as negotiated in the agreement."""
)
# locked_ind was never implemented in the front end.
# The desire is that sys admins and help desk should be able to lock a record
# so it cannot be edited (unless they unlock it).
# For now, marking it as not-editable.
# TO-DO: Implement this
locked_ind = models.CharField(
choices=BOOL_CHOICES, max_length=1, default="N", editable=False
)
status = models.CharField(
"Agreement Status",
max_length=40,
choices=STATUS_CHOICES,
default="NEW-APPLICATION",
editable=False,
)
status_date = models.DateField(auto_now_add=True)
# TO-DO: created_by should record current user on save()
created_by = models.CharField(max_length=30, editable=False) # FK?
created_date = models.DateField(auto_now_add=True)
# Sys generated field. Comes from RACA, says what instance of the application it is.
# TO-DO: determine how to generate it.
created_in_instance = models.DecimalField(
max_digits=6, decimal_places=0, editable=False
)
# There may be a subset of fields that should trigger this but it will be the current
# user that did the modification
modified_by = models.CharField(max_length=30, blank=True, null=True) # FK?
modified_date = models.DateField(blank=True, null=True, auto_now=True)
# Same as created_in_instance, we need to determine how to generate this.
modified_in_instance = models.DecimalField(
max_digits=6, decimal_places=0, blank=True, null=True, editable=False
)
# These are the fields agreement numbers are derived from.
# Few people interact with them directly though.
fed_id_fy = models.CharField(
max_length=4, blank=True, null=True, choices=YEAR_CHOICES
)
fed_id_type = models.CharField(
max_length=2, blank=True, null=True, choices=FED_ID_TYPE_CHOICES
)
fed_id_agency = models.CharField(
"Agency",
max_length=2,
blank=True,
null=True,
help_text="""
The two-digit code the agency.
For the Forest Service, the agency is always "11".
""",
default="11",
)
fed_id_region = models.CharField(
max_length=2,
blank=True,
null=True,
choices=FED_ID_REGION_CHOICES,
help_text="The two-digit code for the region of the instrument, such as 01 or 13.",
)
fed_id_unit = models.CharField(
max_length=2,
blank=True,
null=True,
help_text="The two-digit code for the unit.",
)
fed_id_subunit = models.CharField(
max_length=2,
blank=True,
null=True,
help_text="The two-digit code for the subunit.",
)
fed_id_seq = models.DecimalField(
max_digits=3,
decimal_places=0,
blank=True,
null=True,
help_text="The three-digit code for the sequence number of the instrument.",
)
# Fields describing project(s) and significant dates.
proj_type = models.CharField(
"Project Type", max_length=3, blank=True, null=True, choices=AB_CHOICES
)
proj_desc = models.TextField(
"Project description",
max_length=2000,
blank=True,
null=True,
help_text="Narrative text describing the project.",
)
proj_received_dt = models.DateField(
"Project received date",
blank=True,
null=True,
help_text="The date the State received the proposal for consideration.",
)
proj_execution_dt = models.DateField(
"Project execution date", blank=True, null=True
)
proj_start_dt = models.DateField(
"Start date", blank=True, null=True, help_text=""""""
)
proj_obligation_dt = models.DateField(
"Project obligation date", blank=True, null=True
)
proj_expiration_dt = models.DateField("Expires", blank=True, null=True)
proj_close_dt = models.DateField(
"Close date",
blank=True,
null=True,
help_text="""
The date the instrument is closed.
Denotes the grant or agreement has been executed and has been officially closed.
Required when closing the instrument.
""",
)
proj_cancellation_dt = models.DateField(
"Cancellation date",
blank=True,
null=True,
help_text="The date the instrument is canceled.",
)
proj_rwu = models.CharField(
"Research Work Unit",
max_length=10,
blank=True,
null=True,
choices=RWU_CHOICES,
)
proj_cfda_no = models.CharField(
"CFDA Number",
max_length=40,
blank=True,
null=True,
choices=CFDA_CHOICES,
help_text="""
The Catalog of Federal Domestic Assistance (CFDA) Number.
Required if the instrument was issued under a federal financial assistance authority
(instrument type DG and CA only). REQUIRED FOR APPROVAL""",
)
proj_science_cd = models.CharField(
"Science Code",
max_length=3,
blank=True,
null=True,
choices=SCIENCE_CODE_CHOICES,
)
project_congressional_district = models.CharField(
max_length=40, blank=True, null=True
)
date_mailed = models.DateField(blank=True, null=True)
date_signed = models.DateField(blank=True, null=True)
comments = models.TextField(max_length=2000, blank=True, null=True)
# Per user guide extramural_ind should be represented as a boolean
# TO-DO: present it as a boolean (may require migration)
extramural_ind = models.CharField(
"Extramural",
choices=BOOL_CHOICES,
max_length=1,
null=True,
default="N",
help_text="For reporting FS Research agreement activities only.",
) # should be a boolean, but appears to be null in some DB instances
research_type = models.CharField(
max_length=1, blank=True, null=True, choices=RESEARCH_TYPE_CHOICES
)
journal_ind = models.CharField("Journal", max_length=1, blank=True, null=True)
mod_number = models.DecimalField(
max_digits=3, decimal_places=0, blank=True, null=True
)
orig_fed_id = models.CharField(
"Original Agreement Number",
max_length=120,
blank=True,
null=True,
help_text="""
If there was a previous agreement, (prior to FY2000 numbering scheme),
enter the original Forest Service identification number here to supply
a cross-reference to the new agreement number.
Note: Do not enter other agency agreement numbers in this field.
They can be entered in the Comments field.
""",
)
master_fed_id = models.CharField(
"Master Fed ID", max_length=120, blank=True, null=True
)
aop_ind = models.CharField(
"AOP",
choices=BOOL_CHOICES,
max_length=1,
null=True,
default="N",
help_text="""
Select Yes from the choice list if the instrument requires an Annual Operating Plan (AOP).
""",
)
geo_type = models.CharField(max_length=2, blank=True, null=True)
managing_state_county = models.CharField(max_length=240, blank=True, null=True)
areas_effected = models.CharField(
"Areas Affected",
max_length=200,
blank=True,
null=True,
help_text="""
Areas (states, localities, congressional districts, and other areas) that are affected by the project.
""",
)
ffis_doc_id = models.CharField("FFIS Doc", max_length=11, blank=True, null=True)
ffin = models.CharField(
"FMMI Agreement Number",
max_length=40,
blank=True,
null=True,
help_text="The agreement number assigned to the instrument in FMMI.",
)
# State fields
state_identifier = models.CharField(
max_length=40, blank=True, null=True
) # choices? FK?
state_eo_code = models.CharField(
"Subject to State E.O.",
max_length=1,
blank=True,
null=True,
choices=BOOL_CHOICES,
default="N",
help_text="Is this instrument subject to Executive Order 12372 review?",
)
# We validate on save that a date was entered if state_eo_code is Y
state_eo_date = models.DateField(
"EO Date",
blank=True,
null=True,
help_text="""
Executive Order 12372 review date.
You must select a date if 'Subject to State E.O.' is Yes.""",
)
# EST Fund fields
fed_est_fund = models.DecimalField(
max_digits=12, decimal_places=2, blank=True, null=True
)
applicant_est_fund = models.DecimalField(
"Total Amount of Funds Requested",
help_text="""The total amount of funds being requested for this proposal.""",
max_digits=12,
decimal_places=2,
blank=True,
null=True,
)
state_est_fund = models.DecimalField(
max_digits=12, decimal_places=2, blank=True, null=True
)
local_est_fund = models.DecimalField(
max_digits=12, decimal_places=2, blank=True, null=True
)
pi_est_fund = models.DecimalField(
max_digits=12, decimal_places=2, blank=True, null=True
)
oth_est_fund = models.DecimalField(
max_digits=12, decimal_places=2, blank=True, null=True
)
reroute_from = models.CharField(max_length=10, blank=True, null=True)
reroute_date = models.DateField(blank=True, null=True)
certification_date = models.DateField(
"Certification date", db_column="certificaion_date", blank=True, null=True
)
applicant_name = models.CharField(
"Applicant/Cooperator Name", max_length=200, blank=True, null=True
)
international_act_ind = models.CharField(
"International Activities",
choices=BOOL_CHOICES,
max_length=1,
null=True,
default="N",
help_text="Indicates if the project has any associated international activities.",
)
advance_allowed_ind = models.CharField(
"Advance Allowed",
choices=BOOL_CHOICES,
max_length=1,
null=True,
default="N",
help_text="Indicates if advanced payments are allowed in the instrument. Select Y if advanced payments are allowed.",
)
authority_approval = models.CharField(
max_length=1, blank=True, null=True, choices=BOOL_CHOICES, default="N"
)
authority = models.CharField(
max_length=1, blank=True, null=True, choices=BOOL_CHOICES, default="N"
)
format = models.CharField(
max_length=1, blank=True, null=True, choices=BOOL_CHOICES, default="N"
) # we may need to safely rename this column
other_approval = models.CharField(
max_length=1, blank=True, null=True, choices=BOOL_CHOICES, default="N"
)
# Per User Guide, this field is not available until the instrument's status is PROP-ACCEPTED
# TO-DO: Add runtime check for self.status and update form to make this editable if needed.
master_agreement_ind = models.CharField(
max_length=1,
blank=True,
null=True,
editable=False,
choices=BOOL_CHOICES,
default="N",
help_text="Indicates if the instrument is a Master Agreement.",
)
progrm_responsibility_type = models.CharField(
"Program Responsibility Type",
max_length=30,
choices=PROGRAM_RESPONSIBILITY_TYPE_CHOICES,
help_text="""
Indicates the specific responsibilities notification type
(Incoming Funding, Outgoing Funding, or Non-Cash ie no exchange of funding)
that is send to the Program Manager when the agreement is executed.
""",
null=True,
)
wppp_status = models.CharField(
"WPAP Status",
max_length=40,
blank=True,
null=True,
choices=WPAP_STATUS_CHOICES,
editable=False,
)
wppp_status_date = models.DateField(
"WPAP status date", blank=True, null=True, editable=False
)
cooperator_agreement_number = models.CharField(
max_length=34,
blank=True,
null=True,
help_text="Cooperator's agreement number, if different than the Forest Service Agreement Number",
) # Is this used to key to a cooperator agreement?
gid = models.CharField("Agreement Number", max_length=16, blank=True, null=True)
admin_open = models.CharField(max_length=1, blank=True, null=True)
last_update = models.DateField(auto_now=True)
class Meta:
db_table = "ii_grants"
verbose_name = "Grant/Agreement"
verbose_name_plural = "Grants and Agreements"
ordering = ["-created_date"]
constraints = [models.UniqueConstraint(fields=["gid"], name="unique_gid")]
def __str__(self):
return self.proj_title
def get_absolute_url(self):
reversed_url = reverse(
"grant_details", kwargs={"app_label": "grants", "pk": self.cn}
)
return reversed_url
def pretty_name(self):
"""
Returns a pretty title-case version of the project title
for use in the admin.
"""
return "%s" % self.proj_title.title()
def resolved_id(self):
"""
Returns an agreement ID if one exists already, or some helpful text if not,
for a more user-friendly admin.
"""
if self.gid:
return self.gid
return "TBD"
pretty_name.short_description = "Project Title"
def pretty_cooperator_name(self):
"""
Prettifies the applicant/cooperator name.
If the name is all uppercase, it transforms it to title case.
If the name is not uppercase, it assumes the name was input as intended and does nothing.
If the name does not exist, it returns an empty string (no change).
"""
if self.applicant_name:
if self.applicant_name.isupper():
return "%s" % self.applicant_name.title()
return self.applicant_name
return ""
pretty_cooperator_name.short_description = "Cooperator name"
def significant_dates(self):
"""
Convenience method to display multiple dates in a single admin changelist column.
"""
datelist_string = """<ul>
<li>Start date: %s</li>
<li>Expires: %s</li>
</ul>""" % (
self.proj_start_dt or "",
self.proj_expiration_dt or "",
)
return mark_safe(datelist_string)
significant_dates.allow_tags = True
@cached_property
def contacts(self):
"""
Follow the contact links and return a list of all the associated contacts."""
try:
instrument = self.accomplishmentinstrument
except AccomplishmentInstrument.DoesNotExist:
return []
links = instrument.accinstcontlink_set.all()
return [
{
"type": link.link_type_name,
"sub_type": link.link_sub_type,
"contact": link.contact,
}
for link in links
]
@cached_property
def org(self):
"""
Shorthand accessor for the Organization code and name.
Note that because of the lack of database integrity we can't trust it to be a true 1:1 relationship.
Until that is cleared up we're going to have to use filter() because get() returns multiple and throws an error.
We'll then grab the first from the set, believing the remainder are duplicates anyway.
This is not ideal.
"""
instrument = AccomplishmentInstrument.objects.filter(grant=self).first()
if not instrument:
return None
return instrument.managing_contact
class GrantAuthority(models.Model):
"""
Defines the authority a given Grant can use.
This is one of the few models with a Django-generated PK.
18F has a copy of possible authority_cd values at
https://docs.google.com/document/d/1ngFEuIN5rtF-zx3WiIxnp31KWTtBmOMV/edit#heading=h.2kicxvv
"""
grant = models.ForeignKey(
Grant, on_delete=models.DO_NOTHING, db_column="grant_cn", editable=False
)
authority_cd = models.CharField(
"Code",
max_length=40,
help_text="""
The legislative authority code under which the instrument was authorized.
The full list is in the GA User Guide""",
)
authority_desc = models.CharField(
"Authority Description", max_length=120, blank=True, null=True
)
last_update = models.DateField(auto_now=True)
class Meta:
db_table = "ii_ga_authorities"
verbose_name_plural = "Grant authorities"
def __str__(self):
return "%s: %s" % (self.authority_cd, self.grant)
def pretty_str(self):
return "%s: %s" % (self.authority_cd, self.grant.title())
pretty_str.short_description = "Grant Authority"
class Note(models.Model):
"""
Defines a note object, which describes a grant.
"""
cn = models.CharField(
primary_key=True, max_length=34, db_index=True, editable=False
)
grant = models.ForeignKey(Grant, on_delete=models.DO_NOTHING, db_column="grant_cn")
comments = models.CharField(
max_length=2000, help_text="Any additional comments regarding the project."
)
note_type = models.CharField(
max_length=50, blank=True, null=True, choices=NOTE_TYPE_CHOICES
)
# we need to learn the difference between these two fields
# and the _by fields need to update on save()
note_by = models.CharField(
max_length=30,
editable=False,
help_text="The user ID of the person who created the note.",
)
created_by = models.CharField(
max_length=30,
editable=False,
)
note_date = models.DateField(auto_now_add=True)
created_date = models.DateField(auto_now_add=True)
# Needs to populate on save.
modified_by = models.CharField(max_length=30, blank=True, null=True, editable=False)
modified_date = models.DateField(blank=True, null=True, auto_now=True)
last_update = models.DateField(auto_now=True)
created_in_instance = models.DecimalField(
max_digits=6, decimal_places=0, editable=False
)
modified_in_instance = models.DecimalField(
max_digits=6, decimal_places=0, blank=True, null=True, editable=False
)
# we need to learn more about how exactly these email fields are used,
# but per the User Guide they should not be user-editable.
email_to = models.CharField(
max_length=500,
blank=True,
null=True,
editable=False,
help_text="The e-mail address(es) of the note recipient (if the message was sent to via e-mail).",
)
# The user guide indicates that email_date should be updated automatically after an email action takes place. Maybe.
email_date = models.DateField(
blank=True,
null=True,
editable=False,
help_text="The date the note was sent to the e-mail recipient(s).",
)
def __str__(self):
return " - ".join([self.comments[:80], str(self.grant)])
class Meta:
db_table = "ii_ga_notes"
# TO-DO: custom save?
# * Create CN
class Category(models.Model):
cn = models.CharField(primary_key=True, max_length=34)
grant = models.ForeignKey(Grant, models.DO_NOTHING, db_column="grant_cn")
category_cd = models.CharField("Code", max_length=2)
category_desc = models.CharField(
"Description", max_length=120, blank=True, null=True
)
last_update = models.DateField(auto_now=True)
class Meta:
# managed = True
db_table = "ii_ga_ip_categories"
def __str__(self):
return self.category_desc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# nnutil2 - Tensorflow utilities for training neural networks
# Copyright (c) 2019, Abdó Roig-Maranges <abdo.roig@gmail.com>
#
# This file is part of 'nnutil2'.
#
# This file may be modified and distributed under the terms of the 3-clause BSD
# license. See the LICENSE file for details.
import tensorflow as tf
class AttachLabel(tf.data.Dataset):
def __init__(self, dataset, label_feature='label', onehot=False, labels=None):
self._labels = labels
self._onehot = onehot
self._label_feature = label_feature
self._input_datasets = [dataset]
dataset = dataset.map(self.attach_label)
self._dataset = dataset
super().__init__(self._dataset._variant_tensor)
def attach_label(self, feature):
label = feature[self._label_feature]
if self._labels is not None and label.dtype in set([tf.string]):
label = tf.py_function(self.label_index_fn, [label], tf.int32)
label = tf.reshape(label, shape=())
if self._onehot:
label = tf.one_hot(label, len(self._labels))
return (feature, label)
def label_index_fn(self, label):
try:
idx = self._labels.index(label.numpy().decode())
except ValueError:
idx = -1
return idx
def _inputs(self):
return list(self._input_datasets)
@property
def element_spec(self):
return self._dataset.element_spec
|
import logging
import unittest
import time
from datetime import datetime
import string
import random
from kafka import * # noqa
from kafka.common import * # noqa
from kafka.codec import has_gzip, has_snappy
from kafka.consumer import MAX_FETCH_BUFFER_SIZE_BYTES
from .fixtures import ZookeeperFixture, KafkaFixture
def random_string(l):
s = "".join(random.choice(string.letters) for i in xrange(l))
return s
def ensure_topic_creation(client, topic_name):
times = 0
while True:
times += 1
client.load_metadata_for_topics(topic_name)
if client.has_metadata_for_topic(topic_name):
break
print "Waiting for %s topic to be created" % topic_name
time.sleep(1)
if times > 30:
raise Exception("Unable to create topic %s" % topic_name)
class KafkaTestCase(unittest.TestCase):
def setUp(self):
self.topic = "%s-%s" % (self.id()[self.id().rindex(".") + 1:], random_string(10))
ensure_topic_creation(self.client, self.topic)
class TestKafkaClient(KafkaTestCase):
@classmethod
def setUpClass(cls): # noqa
cls.zk = ZookeeperFixture.instance()
cls.server = KafkaFixture.instance(0, cls.zk.host, cls.zk.port)
cls.client = KafkaClient('%s:%d' % (cls.server.host, cls.server.port))
@classmethod
def tearDownClass(cls): # noqa
cls.client.close()
cls.server.close()
cls.zk.close()
#####################
# Produce Tests #
#####################
def test_produce_many_simple(self):
produce = ProduceRequest(self.topic, 0, messages=[
create_message("Test message %d" % i) for i in range(100)
])
for resp in self.client.send_produce_request([produce]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
(offset, ) = self.client.send_offset_request([OffsetRequest(self.topic, 0, -1, 1)])
self.assertEquals(offset.offsets[0], 100)
for resp in self.client.send_produce_request([produce]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 100)
(offset, ) = self.client.send_offset_request([OffsetRequest(self.topic, 0, -1, 1)])
self.assertEquals(offset.offsets[0], 200)
for resp in self.client.send_produce_request([produce]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 200)
(offset, ) = self.client.send_offset_request([OffsetRequest(self.topic, 0, -1, 1)])
self.assertEquals(offset.offsets[0], 300)
def test_produce_10k_simple(self):
produce = ProduceRequest(self.topic, 0, messages=[
create_message("Test message %d" % i) for i in range(10000)
])
for resp in self.client.send_produce_request([produce]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
(offset, ) = self.client.send_offset_request([OffsetRequest(self.topic, 0, -1, 1)])
self.assertEquals(offset.offsets[0], 10000)
def test_produce_many_gzip(self):
if not has_gzip():
return
message1 = create_gzip_message(["Gzipped 1 %d" % i for i in range(100)])
message2 = create_gzip_message(["Gzipped 2 %d" % i for i in range(100)])
produce = ProduceRequest(self.topic, 0, messages=[message1, message2])
for resp in self.client.send_produce_request([produce]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
(offset, ) = self.client.send_offset_request([OffsetRequest(self.topic, 0, -1, 1)])
self.assertEquals(offset.offsets[0], 200)
def test_produce_many_snappy(self):
if not has_snappy():
return
message1 = create_snappy_message(["Snappy 1 %d" % i for i in range(100)])
message2 = create_snappy_message(["Snappy 2 %d" % i for i in range(100)])
produce = ProduceRequest(self.topic, 0, messages=[message1, message2])
for resp in self.client.send_produce_request([produce]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
(offset, ) = self.client.send_offset_request([OffsetRequest(self.topic, 0, -1, 1)])
self.assertEquals(offset.offsets[0], 200)
def test_produce_mixed(self):
if not has_gzip() or not has_snappy():
return
message1 = create_message("Just a plain message")
message2 = create_gzip_message(["Gzipped %d" % i for i in range(100)])
message3 = create_snappy_message(["Snappy %d" % i for i in range(100)])
produce = ProduceRequest(self.topic, 0, messages=[message1, message2, message3])
for resp in self.client.send_produce_request([produce]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
(offset, ) = self.client.send_offset_request([OffsetRequest(self.topic, 0, -1, 1)])
self.assertEquals(offset.offsets[0], 201)
def test_produce_100k_gzipped(self):
req1 = ProduceRequest(self.topic, 0, messages=[
create_gzip_message(["Gzipped batch 1, message %d" % i for i in range(50000)])
])
for resp in self.client.send_produce_request([req1]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
(offset, ) = self.client.send_offset_request([OffsetRequest(self.topic, 0, -1, 1)])
self.assertEquals(offset.offsets[0], 50000)
req2 = ProduceRequest(self.topic, 0, messages=[
create_gzip_message(["Gzipped batch 2, message %d" % i for i in range(50000)])
])
for resp in self.client.send_produce_request([req2]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 50000)
(offset, ) = self.client.send_offset_request([OffsetRequest(self.topic, 0, -1, 1)])
self.assertEquals(offset.offsets[0], 100000)
#####################
# Consume Tests #
#####################
def test_consume_none(self):
fetch = FetchRequest(self.topic, 0, 0, 1024)
fetch_resp = self.client.send_fetch_request([fetch])[0]
self.assertEquals(fetch_resp.error, 0)
self.assertEquals(fetch_resp.topic, self.topic)
self.assertEquals(fetch_resp.partition, 0)
messages = list(fetch_resp.messages)
self.assertEquals(len(messages), 0)
def test_produce_consume(self):
produce = ProduceRequest(self.topic, 0, messages=[
create_message("Just a test message"),
create_message("Message with a key", "foo"),
])
for resp in self.client.send_produce_request([produce]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
fetch = FetchRequest(self.topic, 0, 0, 1024)
fetch_resp = self.client.send_fetch_request([fetch])[0]
self.assertEquals(fetch_resp.error, 0)
messages = list(fetch_resp.messages)
self.assertEquals(len(messages), 2)
self.assertEquals(messages[0].offset, 0)
self.assertEquals(messages[0].message.value, "Just a test message")
self.assertEquals(messages[0].message.key, None)
self.assertEquals(messages[1].offset, 1)
self.assertEquals(messages[1].message.value, "Message with a key")
self.assertEquals(messages[1].message.key, "foo")
def test_produce_consume_many(self):
produce = ProduceRequest(self.topic, 0, messages=[
create_message("Test message %d" % i) for i in range(100)
])
for resp in self.client.send_produce_request([produce]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
# 1024 is not enough for 100 messages...
fetch1 = FetchRequest(self.topic, 0, 0, 1024)
(fetch_resp1,) = self.client.send_fetch_request([fetch1])
self.assertEquals(fetch_resp1.error, 0)
self.assertEquals(fetch_resp1.highwaterMark, 100)
messages = list(fetch_resp1.messages)
self.assertTrue(len(messages) < 100)
# 10240 should be enough
fetch2 = FetchRequest(self.topic, 0, 0, 10240)
(fetch_resp2,) = self.client.send_fetch_request([fetch2])
self.assertEquals(fetch_resp2.error, 0)
self.assertEquals(fetch_resp2.highwaterMark, 100)
messages = list(fetch_resp2.messages)
self.assertEquals(len(messages), 100)
for i, message in enumerate(messages):
self.assertEquals(message.offset, i)
self.assertEquals(message.message.value, "Test message %d" % i)
self.assertEquals(message.message.key, None)
def test_produce_consume_two_partitions(self):
produce1 = ProduceRequest(self.topic, 0, messages=[
create_message("Partition 0 %d" % i) for i in range(10)
])
produce2 = ProduceRequest(self.topic, 1, messages=[
create_message("Partition 1 %d" % i) for i in range(10)
])
for resp in self.client.send_produce_request([produce1, produce2]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
fetch1 = FetchRequest(self.topic, 0, 0, 1024)
fetch2 = FetchRequest(self.topic, 1, 0, 1024)
fetch_resp1, fetch_resp2 = self.client.send_fetch_request([fetch1, fetch2])
self.assertEquals(fetch_resp1.error, 0)
self.assertEquals(fetch_resp1.highwaterMark, 10)
messages = list(fetch_resp1.messages)
self.assertEquals(len(messages), 10)
for i, message in enumerate(messages):
self.assertEquals(message.offset, i)
self.assertEquals(message.message.value, "Partition 0 %d" % i)
self.assertEquals(message.message.key, None)
self.assertEquals(fetch_resp2.error, 0)
self.assertEquals(fetch_resp2.highwaterMark, 10)
messages = list(fetch_resp2.messages)
self.assertEquals(len(messages), 10)
for i, message in enumerate(messages):
self.assertEquals(message.offset, i)
self.assertEquals(message.message.value, "Partition 1 %d" % i)
self.assertEquals(message.message.key, None)
####################
# Offset Tests #
####################
@unittest.skip('commmit offset not supported in this version')
def test_commit_fetch_offsets(self):
req = OffsetCommitRequest(self.topic, 0, 42, "metadata")
(resp,) = self.client.send_offset_commit_request("group", [req])
self.assertEquals(resp.error, 0)
req = OffsetFetchRequest(self.topic, 0)
(resp,) = self.client.send_offset_fetch_request("group", [req])
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 42)
self.assertEquals(resp.metadata, "") # Metadata isn't stored for now
# Producer Tests
def test_simple_producer(self):
producer = SimpleProducer(self.client)
resp = producer.send_messages(self.topic, "one", "two")
# Will go to partition 0
self.assertEquals(len(resp), 1)
self.assertEquals(resp[0].error, 0)
self.assertEquals(resp[0].offset, 0) # offset of first msg
# Will go to partition 1
resp = producer.send_messages(self.topic, "three")
self.assertEquals(len(resp), 1)
self.assertEquals(resp[0].error, 0)
self.assertEquals(resp[0].offset, 0) # offset of first msg
fetch1 = FetchRequest(self.topic, 0, 0, 1024)
fetch2 = FetchRequest(self.topic, 1, 0, 1024)
fetch_resp1, fetch_resp2 = self.client.send_fetch_request([fetch1,
fetch2])
self.assertEquals(fetch_resp1.error, 0)
self.assertEquals(fetch_resp1.highwaterMark, 2)
messages = list(fetch_resp1.messages)
self.assertEquals(len(messages), 2)
self.assertEquals(messages[0].message.value, "one")
self.assertEquals(messages[1].message.value, "two")
self.assertEquals(fetch_resp2.error, 0)
self.assertEquals(fetch_resp2.highwaterMark, 1)
messages = list(fetch_resp2.messages)
self.assertEquals(len(messages), 1)
self.assertEquals(messages[0].message.value, "three")
# Will go to partition 0
resp = producer.send_messages(self.topic, "four", "five")
self.assertEquals(len(resp), 1)
self.assertEquals(resp[0].error, 0)
self.assertEquals(resp[0].offset, 2) # offset of first msg
producer.stop()
def test_round_robin_partitioner(self):
producer = KeyedProducer(self.client,
partitioner=RoundRobinPartitioner)
producer.send(self.topic, "key1", "one")
producer.send(self.topic, "key2", "two")
producer.send(self.topic, "key3", "three")
producer.send(self.topic, "key4", "four")
fetch1 = FetchRequest(self.topic, 0, 0, 1024)
fetch2 = FetchRequest(self.topic, 1, 0, 1024)
fetch_resp1, fetch_resp2 = self.client.send_fetch_request([fetch1,
fetch2])
self.assertEquals(fetch_resp1.error, 0)
self.assertEquals(fetch_resp1.highwaterMark, 2)
self.assertEquals(fetch_resp1.partition, 0)
messages = list(fetch_resp1.messages)
self.assertEquals(len(messages), 2)
self.assertEquals(messages[0].message.value, "one")
self.assertEquals(messages[1].message.value, "three")
self.assertEquals(fetch_resp2.error, 0)
self.assertEquals(fetch_resp2.highwaterMark, 2)
self.assertEquals(fetch_resp2.partition, 1)
messages = list(fetch_resp2.messages)
self.assertEquals(len(messages), 2)
self.assertEquals(messages[0].message.value, "two")
self.assertEquals(messages[1].message.value, "four")
producer.stop()
def test_hashed_partitioner(self):
producer = KeyedProducer(self.client,
partitioner=HashedPartitioner)
producer.send(self.topic, 1, "one")
producer.send(self.topic, 2, "two")
producer.send(self.topic, 3, "three")
producer.send(self.topic, 4, "four")
fetch1 = FetchRequest(self.topic, 0, 0, 1024)
fetch2 = FetchRequest(self.topic, 1, 0, 1024)
fetch_resp1, fetch_resp2 = self.client.send_fetch_request([fetch1,
fetch2])
self.assertEquals(fetch_resp1.error, 0)
self.assertEquals(fetch_resp1.highwaterMark, 2)
self.assertEquals(fetch_resp1.partition, 0)
messages = list(fetch_resp1.messages)
self.assertEquals(len(messages), 2)
self.assertEquals(messages[0].message.value, "two")
self.assertEquals(messages[1].message.value, "four")
self.assertEquals(fetch_resp2.error, 0)
self.assertEquals(fetch_resp2.highwaterMark, 2)
self.assertEquals(fetch_resp2.partition, 1)
messages = list(fetch_resp2.messages)
self.assertEquals(len(messages), 2)
self.assertEquals(messages[0].message.value, "one")
self.assertEquals(messages[1].message.value, "three")
producer.stop()
def test_acks_none(self):
producer = SimpleProducer(self.client,
req_acks=SimpleProducer.ACK_NOT_REQUIRED)
resp = producer.send_messages(self.topic, "one")
self.assertEquals(len(resp), 0)
fetch = FetchRequest(self.topic, 0, 0, 1024)
fetch_resp = self.client.send_fetch_request([fetch])
self.assertEquals(fetch_resp[0].error, 0)
self.assertEquals(fetch_resp[0].highwaterMark, 1)
self.assertEquals(fetch_resp[0].partition, 0)
messages = list(fetch_resp[0].messages)
self.assertEquals(len(messages), 1)
self.assertEquals(messages[0].message.value, "one")
producer.stop()
def test_acks_local_write(self):
producer = SimpleProducer(self.client,
req_acks=SimpleProducer.ACK_AFTER_LOCAL_WRITE)
resp = producer.send_messages(self.topic, "one")
self.assertEquals(len(resp), 1)
fetch = FetchRequest(self.topic, 0, 0, 1024)
fetch_resp = self.client.send_fetch_request([fetch])
self.assertEquals(fetch_resp[0].error, 0)
self.assertEquals(fetch_resp[0].highwaterMark, 1)
self.assertEquals(fetch_resp[0].partition, 0)
messages = list(fetch_resp[0].messages)
self.assertEquals(len(messages), 1)
self.assertEquals(messages[0].message.value, "one")
producer.stop()
def test_acks_cluster_commit(self):
producer = SimpleProducer(
self.client,
req_acks=SimpleProducer.ACK_AFTER_CLUSTER_COMMIT)
resp = producer.send_messages(self.topic, "one")
self.assertEquals(len(resp), 1)
fetch = FetchRequest(self.topic, 0, 0, 1024)
fetch_resp = self.client.send_fetch_request([fetch])
self.assertEquals(fetch_resp[0].error, 0)
self.assertEquals(fetch_resp[0].highwaterMark, 1)
self.assertEquals(fetch_resp[0].partition, 0)
messages = list(fetch_resp[0].messages)
self.assertEquals(len(messages), 1)
self.assertEquals(messages[0].message.value, "one")
producer.stop()
def test_async_simple_producer(self):
producer = SimpleProducer(self.client, async=True)
resp = producer.send_messages(self.topic, "one")
self.assertEquals(len(resp), 0)
# Give it some time
time.sleep(2)
fetch = FetchRequest(self.topic, 0, 0, 1024)
fetch_resp = self.client.send_fetch_request([fetch])
self.assertEquals(fetch_resp[0].error, 0)
self.assertEquals(fetch_resp[0].highwaterMark, 1)
self.assertEquals(fetch_resp[0].partition, 0)
messages = list(fetch_resp[0].messages)
self.assertEquals(len(messages), 1)
self.assertEquals(messages[0].message.value, "one")
producer.stop()
def test_async_keyed_producer(self):
producer = KeyedProducer(self.client, async=True)
resp = producer.send(self.topic, "key1", "one")
self.assertEquals(len(resp), 0)
# Give it some time
time.sleep(2)
fetch = FetchRequest(self.topic, 0, 0, 1024)
fetch_resp = self.client.send_fetch_request([fetch])
self.assertEquals(fetch_resp[0].error, 0)
self.assertEquals(fetch_resp[0].highwaterMark, 1)
self.assertEquals(fetch_resp[0].partition, 0)
messages = list(fetch_resp[0].messages)
self.assertEquals(len(messages), 1)
self.assertEquals(messages[0].message.value, "one")
producer.stop()
def test_batched_simple_producer(self):
producer = SimpleProducer(self.client,
batch_send=True,
batch_send_every_n=10,
batch_send_every_t=20)
# Send 5 messages and do a fetch
msgs = ["message-%d" % i for i in range(0, 5)]
resp = producer.send_messages(self.topic, *msgs)
# Batch mode is async. No ack
self.assertEquals(len(resp), 0)
# Give it some time
time.sleep(2)
fetch1 = FetchRequest(self.topic, 0, 0, 1024)
fetch2 = FetchRequest(self.topic, 1, 0, 1024)
fetch_resp1, fetch_resp2 = self.client.send_fetch_request([fetch1,
fetch2])
self.assertEquals(fetch_resp1.error, 0)
messages = list(fetch_resp1.messages)
self.assertEquals(len(messages), 0)
self.assertEquals(fetch_resp2.error, 0)
messages = list(fetch_resp2.messages)
self.assertEquals(len(messages), 0)
# Send 5 more messages, wait for 2 seconds and do a fetch
msgs = ["message-%d" % i for i in range(5, 10)]
resp = producer.send_messages(self.topic, *msgs)
# Give it some time
time.sleep(2)
fetch1 = FetchRequest(self.topic, 0, 0, 1024)
fetch2 = FetchRequest(self.topic, 1, 0, 1024)
fetch_resp1, fetch_resp2 = self.client.send_fetch_request([fetch1,
fetch2])
self.assertEquals(fetch_resp1.error, 0)
messages = list(fetch_resp1.messages)
self.assertEquals(len(messages), 5)
self.assertEquals(fetch_resp2.error, 0)
messages = list(fetch_resp2.messages)
self.assertEquals(len(messages), 5)
# Send 7 messages and wait for 20 seconds
msgs = ["message-%d" % i for i in range(10, 15)]
resp = producer.send_messages(self.topic, *msgs)
msgs = ["message-%d" % i for i in range(15, 17)]
resp = producer.send_messages(self.topic, *msgs)
fetch1 = FetchRequest(self.topic, 0, 5, 1024)
fetch2 = FetchRequest(self.topic, 1, 5, 1024)
fetch_resp1, fetch_resp2 = self.client.send_fetch_request([fetch1,
fetch2])
self.assertEquals(fetch_resp1.error, 0)
self.assertEquals(fetch_resp2.error, 0)
messages = list(fetch_resp1.messages) + list(fetch_resp2.messages)
self.assertEquals(len(messages), 0)
# Give it some time
time.sleep(22)
fetch1 = FetchRequest(self.topic, 0, 5, 1024)
fetch2 = FetchRequest(self.topic, 1, 5, 1024)
fetch_resp1, fetch_resp2 = self.client.send_fetch_request([fetch1,
fetch2])
self.assertEquals(fetch_resp1.error, 0)
self.assertEquals(fetch_resp2.error, 0)
messages = list(fetch_resp1.messages) + list(fetch_resp2.messages)
self.assertEquals(len(messages), 7)
producer.stop()
class TestConsumer(KafkaTestCase):
@classmethod
def setUpClass(cls):
cls.zk = ZookeeperFixture.instance()
cls.server1 = KafkaFixture.instance(0, cls.zk.host, cls.zk.port)
cls.server2 = KafkaFixture.instance(1, cls.zk.host, cls.zk.port)
cls.client = KafkaClient('%s:%d' % (cls.server2.host, cls.server2.port))
@classmethod
def tearDownClass(cls): # noqa
cls.client.close()
cls.server1.close()
cls.server2.close()
cls.zk.close()
def test_simple_consumer(self):
# Produce 100 messages to partition 0
produce1 = ProduceRequest(self.topic, 0, messages=[
create_message("Test message 0 %d" % i) for i in range(100)
])
for resp in self.client.send_produce_request([produce1]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
# Produce 100 messages to partition 1
produce2 = ProduceRequest(self.topic, 1, messages=[
create_message("Test message 1 %d" % i) for i in range(100)
])
for resp in self.client.send_produce_request([produce2]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
# Start a consumer
consumer = SimpleConsumer(self.client, "group1",
self.topic, auto_commit=False,
iter_timeout=0)
all_messages = []
for message in consumer:
all_messages.append(message)
self.assertEquals(len(all_messages), 200)
# Make sure there are no duplicates
self.assertEquals(len(all_messages), len(set(all_messages)))
consumer.seek(-10, 2)
all_messages = []
for message in consumer:
all_messages.append(message)
self.assertEquals(len(all_messages), 10)
consumer.seek(-13, 2)
all_messages = []
for message in consumer:
all_messages.append(message)
self.assertEquals(len(all_messages), 13)
consumer.stop()
def test_simple_consumer_blocking(self):
consumer = SimpleConsumer(self.client, "group1",
self.topic,
auto_commit=False, iter_timeout=0)
# Blocking API
start = datetime.now()
messages = consumer.get_messages(block=True, timeout=5)
diff = (datetime.now() - start).total_seconds()
self.assertGreaterEqual(diff, 5)
self.assertEqual(len(messages), 0)
# Send 10 messages
produce = ProduceRequest(self.topic, 0, messages=[
create_message("Test message 0 %d" % i) for i in range(10)
])
for resp in self.client.send_produce_request([produce]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
# Fetch 5 messages
messages = consumer.get_messages(count=5, block=True, timeout=5)
self.assertEqual(len(messages), 5)
# Fetch 10 messages
start = datetime.now()
messages = consumer.get_messages(count=10, block=True, timeout=5)
self.assertEqual(len(messages), 5)
diff = (datetime.now() - start).total_seconds()
self.assertGreaterEqual(diff, 5)
consumer.stop()
def test_simple_consumer_pending(self):
# Produce 10 messages to partition 0 and 1
produce1 = ProduceRequest(self.topic, 0, messages=[
create_message("Test message 0 %d" % i) for i in range(10)
])
for resp in self.client.send_produce_request([produce1]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
produce2 = ProduceRequest(self.topic, 1, messages=[
create_message("Test message 1 %d" % i) for i in range(10)
])
for resp in self.client.send_produce_request([produce2]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
consumer = SimpleConsumer(self.client, "group1", self.topic,
auto_commit=False, iter_timeout=0)
self.assertEquals(consumer.pending(), 20)
self.assertEquals(consumer.pending(partitions=[0]), 10)
self.assertEquals(consumer.pending(partitions=[1]), 10)
consumer.stop()
def test_multi_process_consumer(self):
# Produce 100 messages to partition 0
produce1 = ProduceRequest(self.topic, 0, messages=[
create_message("Test message 0 %d" % i) for i in range(100)
])
for resp in self.client.send_produce_request([produce1]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
# Produce 100 messages to partition 1
produce2 = ProduceRequest(self.topic, 1, messages=[
create_message("Test message 1 %d" % i) for i in range(100)
])
for resp in self.client.send_produce_request([produce2]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
# Start a consumer
consumer = MultiProcessConsumer(self.client, "grp1", self.topic, auto_commit=False)
all_messages = []
for message in consumer:
all_messages.append(message)
self.assertEquals(len(all_messages), 200)
# Make sure there are no duplicates
self.assertEquals(len(all_messages), len(set(all_messages)))
# Blocking API
start = datetime.now()
messages = consumer.get_messages(block=True, timeout=5)
diff = (datetime.now() - start).total_seconds()
self.assertGreaterEqual(diff, 4.999)
self.assertEqual(len(messages), 0)
# Send 10 messages
produce = ProduceRequest(self.topic, 0, messages=[
create_message("Test message 0 %d" % i) for i in range(10)
])
for resp in self.client.send_produce_request([produce]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 100)
# Fetch 5 messages
messages = consumer.get_messages(count=5, block=True, timeout=5)
self.assertEqual(len(messages), 5)
# Fetch 10 messages
start = datetime.now()
messages = consumer.get_messages(count=10, block=True, timeout=5)
self.assertEqual(len(messages), 5)
diff = (datetime.now() - start).total_seconds()
self.assertGreaterEqual(diff, 5)
consumer.stop()
def test_multi_proc_pending(self):
# Produce 10 messages to partition 0 and 1
produce1 = ProduceRequest(self.topic, 0, messages=[
create_message("Test message 0 %d" % i) for i in range(10)
])
for resp in self.client.send_produce_request([produce1]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
produce2 = ProduceRequest(self.topic, 1, messages=[
create_message("Test message 1 %d" % i) for i in range(10)
])
for resp in self.client.send_produce_request([produce2]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
consumer = MultiProcessConsumer(self.client, "group1", self.topic, auto_commit=False)
self.assertEquals(consumer.pending(), 20)
self.assertEquals(consumer.pending(partitions=[0]), 10)
self.assertEquals(consumer.pending(partitions=[1]), 10)
consumer.stop()
def test_large_messages(self):
# Produce 10 "normal" size messages
messages1 = [create_message(random_string(1024)) for i in range(10)]
produce1 = ProduceRequest(self.topic, 0, messages1)
for resp in self.client.send_produce_request([produce1]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 0)
# Produce 10 messages that are large (bigger than default fetch size)
messages2 = [create_message(random_string(5000)) for i in range(10)]
produce2 = ProduceRequest(self.topic, 0, messages2)
for resp in self.client.send_produce_request([produce2]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 10)
# Consumer should still get all of them
consumer = SimpleConsumer(self.client, "group1", self.topic,
auto_commit=False, iter_timeout=0)
all_messages = messages1 + messages2
for i, message in enumerate(consumer):
self.assertEquals(all_messages[i], message.message)
self.assertEquals(i, 19)
# Produce 1 message that is too large (bigger than max fetch size)
big_message_size = MAX_FETCH_BUFFER_SIZE_BYTES + 10
big_message = create_message(random_string(big_message_size))
produce3 = ProduceRequest(self.topic, 0, [big_message])
for resp in self.client.send_produce_request([produce3]):
self.assertEquals(resp.error, 0)
self.assertEquals(resp.offset, 20)
self.assertRaises(ConsumerFetchSizeTooSmall, consumer.get_message, False, 0.1)
# Create a consumer with no fetch size limit
big_consumer = SimpleConsumer(self.client, "group1", self.topic,
max_buffer_size=None, partitions=[0],
auto_commit=False, iter_timeout=0)
# Seek to the last message
big_consumer.seek(-1, 2)
# Consume giant message successfully
message = big_consumer.get_message(block=False, timeout=10)
self.assertIsNotNone(message)
self.assertEquals(message.message.value, big_message.value)
class TestFailover(KafkaTestCase):
@classmethod
def setUpClass(cls): # noqa
zk_chroot = random_string(10)
replicas = 2
partitions = 2
# mini zookeeper, 2 kafka brokers
cls.zk = ZookeeperFixture.instance()
kk_args = [cls.zk.host, cls.zk.port, zk_chroot, replicas, partitions]
cls.brokers = [KafkaFixture.instance(i, *kk_args) for i in range(replicas)]
hosts = ['%s:%d' % (b.host, b.port) for b in cls.brokers]
cls.client = KafkaClient(hosts)
@classmethod
def tearDownClass(cls):
cls.client.close()
for broker in cls.brokers:
broker.close()
cls.zk.close()
def test_switch_leader(self):
key, topic, partition = random_string(5), self.topic, 0
producer = SimpleProducer(self.client)
for i in range(1, 4):
# XXX unfortunately, the conns dict needs to be warmed for this to work
# XXX unfortunately, for warming to work, we need at least as many partitions as brokers
self._send_random_messages(producer, self.topic, 10)
# kil leader for partition 0
broker = self._kill_leader(topic, partition)
# expect failure, reload meta data
with self.assertRaises(FailedPayloadsError):
producer.send_messages(self.topic, 'part 1')
producer.send_messages(self.topic, 'part 2')
time.sleep(1)
# send to new leader
self._send_random_messages(producer, self.topic, 10)
broker.open()
time.sleep(3)
# count number of messages
count = self._count_messages('test_switch_leader group %s' % i, topic)
self.assertIn(count, range(20 * i, 22 * i + 1))
producer.stop()
def test_switch_leader_async(self):
key, topic, partition = random_string(5), self.topic, 0
producer = SimpleProducer(self.client, async=True)
for i in range(1, 4):
self._send_random_messages(producer, self.topic, 10)
# kil leader for partition 0
broker = self._kill_leader(topic, partition)
# expect failure, reload meta data
producer.send_messages(self.topic, 'part 1')
producer.send_messages(self.topic, 'part 2')
time.sleep(1)
# send to new leader
self._send_random_messages(producer, self.topic, 10)
broker.open()
time.sleep(3)
# count number of messages
count = self._count_messages('test_switch_leader_async group %s' % i, topic)
self.assertIn(count, range(20 * i, 22 * i + 1))
producer.stop()
def _send_random_messages(self, producer, topic, n):
for j in range(n):
resp = producer.send_messages(topic, random_string(10))
if len(resp) > 0:
self.assertEquals(resp[0].error, 0)
time.sleep(1) # give it some time
def _kill_leader(self, topic, partition):
leader = self.client.topics_to_brokers[TopicAndPartition(topic, partition)]
broker = self.brokers[leader.nodeId]
broker.close()
time.sleep(1) # give it some time
return broker
def _count_messages(self, group, topic):
hosts = '%s:%d' % (self.brokers[0].host, self.brokers[0].port)
client = KafkaClient(hosts)
consumer = SimpleConsumer(client, group, topic, auto_commit=False, iter_timeout=0)
all_messages = []
for message in consumer:
all_messages.append(message)
consumer.stop()
client.close()
return len(all_messages)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("WRITE")
process.source = cms.Source("EmptySource", numberEventsInLuminosityBlock = cms.untracked.uint32(4))
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(20))
process.out = cms.OutputModule("PoolOutputModule", fileName = cms.untracked.string("multi_lumi.root"))
process.o = cms.EndPath(process.out)
|
import os
import subprocess
import time
import boardom as bd
from .gitignore import IGNORE, IGNORE_FIRST
# TODO: (SOS) Guard git calls to ignore CTRL-C and SIGINTS until finished
# TODO: (SOS) Make it only master process
# TODO: Add logging
# TODO: Manage gitignore (e.g. .pth files, boardom files etc)
# TODO: Smart adding of files to index (?)
class _Gitter:
BD_BRANCH_NAME = 'bd_autocommits'
def __init__(self):
self.directory = bd.process_path(bd.main_file_path())
self.lockfile = os.path.join(self.directory, '.bd.gitlock')
if not os.path.isdir(self.directory):
raise RuntimeError(f'[boardom] Invalid directory {self.directory}')
self.is_git_dir = None
self.status = None
self.status_codes = None
self.status_files = None
self.current_branch = None
self.has_branches = None
self.has_unstaged = None
def __call__(self, cmd, should_raise=True, log=False):
try:
if log:
bd.log(f'git {cmd}')
proc = subprocess.run(
f'cd {self.directory}; git {cmd}',
shell=True,
capture_output=True,
text=True,
)
except subprocess.CalledProcessError:
if should_raise:
raise
pass
success = proc.returncode == 0
return proc, success
def _check_is_git_dir(self):
_, success = self('rev-parse', should_raise=False)
return success
def _set_base_git_dir(self):
proc, _ = self('rev-parse --git-dir')
gitdir = proc.stdout.rstrip()
if os.path.isabs(gitdir):
self.directory = os.path.dirname(gitdir)
elif gitdir != '.git':
raise RuntimeError(
f'[boardom] Could not get base git directory for {self.directory}.'
)
def check_current_branch_name(self):
proc, has_branches = self(
'rev-parse --abbrev-ref --symbolic-full-name HEAD', should_raise=False
)
current_branch = proc.stdout.rstrip()
return current_branch, has_branches
def _get_current_commit_hash(self):
proc, _ = self('rev-parse HEAD')
return proc.stdout.rstrip()
def _update_status(self):
self.is_git_dir = self._check_is_git_dir()
if not self.is_git_dir:
return
self._set_base_git_dir()
self.current_branch, self.has_branches = self.check_current_branch_name()
self.current_commit = self._get_current_commit_hash()
# Set status
proc, _ = self('status --porcelain ')
status = proc.stdout.split('\n')
self.status = [[y for y in x.split(' ') if y] for x in status if x]
# self.untracked = [x[1] for x in self.status if x[0] == '??']
# self.modified = [x[1] for x in self.status if x[0] == 'M']
# self.added = [x[1] for x in self.status if x[0] == 'A']
# self.deleted = [x[1] for x in self.status if x[0] == 'D']
# self.renamed = [x[1] for x in self.status if x[0] == 'R']
# self.copied = [x[1] for x in self.status if x[0] == 'C']
def _print_info(self):
print(f'Is git dir: {self.is_git_dir}')
print(f'Directory: {self.directory}')
print(f'Current branch: {self.current_branch}')
print(f'Has branches: {self.has_branches}')
print(f'Status: {self.status}')
def _message(self):
timestamp = time.strftime("Date: %Y/%m/%d, Time: %H:%M:%S")
return f'Boardom autocommit\n{timestamp}'
def _initialize_and_commit(self):
proc, _ = self('init')
self._update_status()
self._add_gitignore()
if not self.status:
raise RuntimeError(
f'[boardom] Git autocommit failed (no files in directory {self.directory})'
)
self(f'checkout -b {self.BD_BRANCH_NAME}', log=True)
self('add .', log=True)
self(f'commit -m "{self._message()}"', log=True)
self("checkout -b master", log=True)
print('[boardom] Initialized git dir and made commit.')
commit_hash = self._get_current_commit_hash()
return commit_hash, 'master', commit_hash
def _do_parallel_commit(self):
self._update_status()
self._add_gitignore()
prev_commit = self.current_commit
prev_branch = self.current_branch
self('add .', log=True)
self('stash', log=True)
proc, success = self(f'checkout -b {self.BD_BRANCH_NAME}', log=True)
if not success:
self(f'checkout {self.BD_BRANCH_NAME}')
self(f'git merge {prev_branch} --no-commit -s recursive -Xtheirs', log=True)
self('checkout stash -- .', log=True)
self('add .', log=True)
self(f'commit -m "{self._message()}"', log=True)
new_commit = self._get_current_commit_hash()
self(f'checkout {prev_branch}', log=True)
self('stash pop', log=True)
print(f'[boardom] Made autocommit: {new_commit}')
return prev_commit, prev_branch, new_commit
def _add_gitignore(self):
# Add gitignore if flagged and no gitignore exists
ignore_file = os.path.join(self.directory, '.gitignore')
if not os.path.exists(ignore_file):
with open(ignore_file, 'w') as f:
f.write(IGNORE)
print('[boardom] Created .gitignore')
else:
with open(ignore_file) as f:
first_line = f.readline()
if first_line != IGNORE_FIRST:
with open(ignore_file, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(IGNORE + content)
print(f'[boardom] Prepended to .gitignore.')
else:
print(f'[boardom] .gitignore already correct.')
def _autocommit(self):
self._update_status()
# If git is not initialized, run git init
if not self.is_git_dir:
return self._initialize_and_commit()
else:
# If we are on master branch: (This should be the expected situation)
if self.current_branch == 'HEAD':
raise RuntimeError(
'[boardom] Git is in a detached head state. Please checkout a branch.'
)
elif self.current_branch == self.BD_BRANCH_NAME:
raise RuntimeError(
'[boardom] Current branch is autocommit branch. '
'If changes were made, please use git stash and '
'git stash pop on your main branch to keep your changes.'
)
return self._do_parallel_commit()
def autocommit(self):
self._update_status()
with bd.filelock(self.lockfile):
with bd.interrupt_guard():
return self._autocommit()
def maybe_autocommit(autocommit, only_run_same_hash, session_path):
if autocommit:
git = _Gitter()
current_hash, current_branch, autohash = git.autocommit()
if only_run_same_hash:
git_hash_file = os.path.join(session_path, '.githash_guard')
if not os.path.isfile(git_hash_file):
bd.write_string_to_file(autohash, git_hash_file)
else:
with open(git_hash_file, 'r') as f:
old_git_hash = f.read()
if old_git_hash != autohash:
bd.error(
'Current git hash ({autohash}) does not match the one '
'used to generate the save directory ({old_git_hash}).'
)
return current_hash, current_branch, autohash
else:
return None, None, None
|
import os
import sys
sys.path.append('../../')
from dependencies import *
from settings import *
from reproducibility import *
from models.TGS_salt.Unet34_scSE_hyper import Unet_scSE_hyper as Net
import pickle
prediction0=pickle.load(open("../../../liao_checkpoints/OHEM_5fold.p","rb"))
prediction1=pickle.load(open("5folds.p","rb"))
predictions=np.array([prediction0,prediction1])
predictions=np.mean(predictions,axis=0)
print(predictions.shape)
print(prediction0.mean())
print(prediction1.mean())
test_path = os.path.join(DATA, 'test')
test_file_list = glob.glob(os.path.join(test_path, 'images', '*.png'))
test_file_list = [f.split('/')[-1].split('.')[0] for f in test_file_list]
test_file_list[:3], test_path
train_path = os.path.join(DATA, 'train')
train_file_list = glob.glob(os.path.join(train_path, 'images', '*.png'))
train_file_list = [f.split('/')[-1].split('.')[0] for f in train_file_list]
train_file_list[:3], train_path
height, width = 101, 101
if height % 32 == 0:
y_min_pad = 0
y_max_pad = 0
else:
y_pad = 32 - height % 32
y_min_pad = int(y_pad / 2)
y_max_pad = y_pad - y_min_pad
if width % 32 == 0:
x_min_pad = 0
x_max_pad = 0
else:
x_pad = 32 - width % 32
x_min_pad = int(x_pad / 2)
x_max_pad = x_pad - x_min_pad
predictions = predictions[:, y_min_pad:128 - y_max_pad, x_min_pad:128 - x_max_pad]
threshold = 0.45
binary_prediction = (predictions > threshold).astype(int)
def rle_encoding(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if (b > prev+1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
all_masks = []
for p_mask in list(binary_prediction):
p_mask = rle_encoding(p_mask)
all_masks.append(' '.join(map(str, p_mask)))
submit = pd.DataFrame([test_file_list, all_masks]).T
submit.columns = ['id', 'rle_mask']
submit.to_csv(RESULT+'/submission_10folds.csv.gz', compression = 'gzip', index = False)
|
# Generated by Django 2.1 on 2019-05-11 13:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comments', '0006_historicalcomment'),
]
operations = [
migrations.AddField(
model_name='comment',
name='first_index',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='comment',
name='highlighted_text',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='comment',
name='last_index',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='historicalcomment',
name='first_index',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='historicalcomment',
name='highlighted_text',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='historicalcomment',
name='last_index',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
|
"""
This module defines functions for extracting/defining version information for
packages. Functions for retrieving version/branch info from SVN, as well as
those for formatting version strings and finding packages with predefined
version info are defined in this module.
"""
# author: Rick Ratzel
# created: 8/9/05
import sys
import os
from os import path
import re
import glob
def get_all_component_versions() :
"""
returns a list of tuples containing (<module name>, <version string>,
<branch name or None if trunk>) for each package containing an appropriate
<module>_version.py file in the package dir. The modules checked are those
present in sys.modules
"""
retList = []
#
# check each module which is in sys.modules that has a <mod>_version.py file
#
for modName in get_all_versioned_modules() :
#
# import the module to get the version info
#
mod = __import__( "%s.%s_version" % (modName, modName),
globals(), locals(),
["%s_version" % modName] )
#
# extract the version string...check two names
# ...if no version was defined, create one using other info if present
# in the version file
#
if( hasattr( mod, "%s_version" % modName ) ) :
verString = getattr( mod, "%s_version" % modName )
elif( hasattr( mod, "version" ) ) :
verString = getattr( mod, "version" )
else :
if( hasattr( mod, "major" ) and hasattr( mod, "minor" ) and
hasattr( mod, "micro" ) and hasattr( mod, "release_level" ) and
hasattr( mod, "revision" ) ) :
verString = create_version_string(
getattr( mod, "major" ), getattr( mod, "minor" ),
getattr( mod, "micro" ), getattr( mod, "release_level"),
getattr( mod, "revision" ), mod.__file__ )
else :
verString = None
#
# extract the branch name if defined, else try to find it...set to None
# if not a branch
#
if( hasattr( mod, "branch" ) ) :
branch = getattr( mod, "branch" )
else :
branch = get_svn_branch( path.dirname( mod.__file__ ) )
retList.append( (modName, verString, branch) )
#
# finally, include the Python version info
#
retList.append( (path.basename( sys.executable ), sys.version, None) )
return retList
def create_version_string( major, minor, micro, release_level, revision,
version_file=None ) :
""" Return a string representing the current version, based on various
attributes of the intended release """
if release_level:
rl = "_" + release_level
else:
rl = ""
verString = "%d.%d.%d%s" % (major, minor, micro, rl)
#
# if the revision had not been supplied by a build, try to find it now
# ...version_file is used simply as a way to get a directory in the package
# which may have an .svn/entries file for extracting version info
#
if( (revision is None) and not( version_file is None ) ) :
revision = get_svn_revision( path.dirname( version_file ) )
verString += "_%s" % revision
return verString
def get_svn_revision( dir_path ) :
"""
return the SVN revision number for the specified dir. This is used when a
revision has not been supplied by a build.
"""
revision = None
# For SVN prior to v1.4, the revision number could be pulled from the
# entries file using a regular expression.
entries = path.join( dir_path, ".svn", "entries" )
if path.isfile( entries ) :
fh = open( entries )
match = re.search( r'revision="(?P<revision>\d+)"', fh.read() )
fh.close()
if( match ) :
revision = int( match.group( "revision" ) )
# For latter versions, we prefer to rely on the svnversion command.
if revision is None:
cmd = 'cd %s && svnversion' % dir_path
result = os.popen(cmd).read()
match = re.search( r'\s*(\S+)', result)
if match:
revision = match.group(1)
# If that doesn't work, try the svn info command.
if revision is None:
cmd = 'cd %s && svn info' % dir_path
result = os.popen(cmd).read()
match = re.search( r'Revision: (\d+)', result)
if match:
revision = match.group(1)
return revision
def get_svn_branch( dir_path ) :
"""
return the SVN branch name for the specified dir. This is used when a
branch name has not been supplied by a build.
"""
entries = path.join( dir_path, ".svn", "entries" )
branch = None
if path.isfile( entries ) :
fh = open( entries )
#
# get the branch name from the url
#
match = re.search( r'url="(?P<url>.+)"', fh.read() )
fh.close()
if( match ) :
url = match.group( "url" )
if( "branches" in url ) :
tail = url.split( "branches" )[-1]
branch = tail.split( "/" )[1]
return branch
def get_all_versioned_modules() :
"""
returns a list of importable modules names currently in sys.modules which
have the necessary version files used by the functions in this module for
getting version info
"""
retList = []
#
# THIS ONLY CHECK MODULES ALREADY IMPORTED
#
for modName in sys.modules.keys() :
#
# if module has a __path__ attr, then it is a package, and packages are
# currently the only things that have the required version files
#
if( hasattr( sys.modules[modName], "__path__" ) ) :
dir = path.dirname( sys.modules[modName].__file__ )
#
# finally, check if the dir making up the package has a version file
#
if( (path.exists( path.join( dir, "%s_version.py" % modName ) )) or
(glob.glob( path.join( dir, "%s_version.py[co]" % modName ))) ) :
retList.append( modName )
return retList
|
# @oncall
"""This is a module."""
def f():
pass
|
"""Tests NotificationData."""
import os
from pathlib import Path
import email
from circuit_maintenance_parser.data import NotificationData
dir_path = os.path.dirname(os.path.realpath(__file__))
def test_init_from_raw():
"""Test the init_data_raw function."""
data = NotificationData.init_from_raw("my_type", b"my_content")
assert isinstance(data, NotificationData)
assert len(data.data_parts) == 1
assert data.data_parts[0].type == "my_type"
assert data.data_parts[0].content == b"my_content"
def test_init_from_raw_with_issue():
"""Test the init_data_raw function with issue."""
data = NotificationData.init_from_raw({}, {})
assert data is None
def test_init_from_email_bytes():
"""Test the email data load."""
with open(Path(dir_path, "data", "email", "test_sample_message.eml"), "rb") as email_file:
email_raw_data = email_file.read()
data = NotificationData.init_from_email_bytes(email_raw_data)
assert isinstance(data, NotificationData)
assert len(data.data_parts) == 5
def test_init_from_email_with_issue():
"""Test the init_data_email function with issue."""
data = NotificationData.init_from_email_bytes("")
assert data is None
def test_init_from_emailmessage():
"""Test the emailmessage data load."""
with open(Path(dir_path, "data", "email", "test_sample_message.eml"), "rb") as email_file:
email_raw_data = email_file.read()
raw_email_string = email_raw_data.decode("utf-8")
email_message = email.message_from_string(raw_email_string)
data = NotificationData.init_from_emailmessage(email_message)
assert isinstance(data, NotificationData)
assert len(data.data_parts) == 5
def test_init_from_emailmessage_with_issue():
"""Test the init_data_emailmessage function with issue."""
data = NotificationData.init_from_emailmessage("")
assert data is None
|
import logging
import re
from types import TracebackType
from typing import (
Any,
Callable,
Dict,
Generic,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
from urllib.parse import urlencode, urlunparse
from selenium.webdriver import Chrome
from selenium.webdriver.remote.webdriver import WebDriver
from idom.server.base import AbstractRenderServer
from idom.server.prefab import hotswap_server
from idom.server.utils import find_available_port, find_builtin_server_type
__all__ = [
"find_available_port",
"create_simple_selenium_web_driver",
"ServerMountPoint",
]
AnyRenderServer = AbstractRenderServer[Any, Any]
def create_simple_selenium_web_driver(
driver_type: Type[WebDriver] = Chrome,
driver_options: Optional[Any] = None,
implicit_wait_timeout: float = 3.0,
page_load_timeout: float = 3.0,
window_size: Tuple[int, int] = (1080, 800),
) -> WebDriver:
driver = driver_type(options=driver_options)
driver.set_window_size(*window_size)
driver.set_page_load_timeout(page_load_timeout)
driver.implicitly_wait(implicit_wait_timeout)
return driver
_Self = TypeVar("_Self", bound="ServerMountPoint[Any, Any]")
_Mount = TypeVar("_Mount")
_Server = TypeVar("_Server", bound=AnyRenderServer)
class ServerMountPoint(Generic[_Mount, _Server]):
"""A context manager for imperatively mounting views to a render server when testing"""
mount: _Mount
server: _Server
_log_handler: "_LogRecordCaptor"
def __init__(
self,
server_type: Type[_Server] = find_builtin_server_type("PerClientStateServer"),
host: str = "127.0.0.1",
port: Optional[int] = None,
server_config: Optional[Any] = None,
run_kwargs: Optional[Dict[str, Any]] = None,
mount_and_server_constructor: "Callable[..., Tuple[_Mount, _Server]]" = hotswap_server, # type: ignore
app: Optional[Any] = None,
**other_options: Any,
) -> None:
self.host = host
self.port = port or find_available_port(host)
self._mount_and_server_constructor: "Callable[[], Tuple[_Mount, _Server]]" = (
lambda: mount_and_server_constructor(
server_type,
self.host,
self.port,
server_config,
run_kwargs,
app,
**other_options,
)
)
@property
def log_records(self) -> List[logging.LogRecord]:
"""A list of captured log records"""
return self._log_handler.records
def url(self, path: str = "", query: Optional[Any] = None) -> str:
"""Return a URL string pointing to the host and point of the server
Args:
path: the path to a resource on the server
query: a dictionary or list of query parameters
"""
return urlunparse(
[
"http",
f"{self.host}:{self.port}",
path,
"",
urlencode(query or ()),
"",
]
)
def list_logged_exceptions(
self,
pattern: str = "",
types: Union[Type[Any], Tuple[Type[Any], ...]] = Exception,
log_level: int = logging.ERROR,
del_log_records: bool = True,
) -> List[BaseException]:
"""Return a list of logged exception matching the given criteria
Args:
log_level: The level of log to check
exclude_exc_types: Any exception types to ignore
del_log_records: Whether to delete the log records for yielded exceptions
"""
found: List[BaseException] = []
compiled_pattern = re.compile(pattern)
for index, record in enumerate(self.log_records):
if record.levelno >= log_level and record.exc_info is not None:
error = record.exc_info[1]
if (
error is not None
and isinstance(error, types)
and compiled_pattern.search(str(error))
):
if del_log_records:
del self.log_records[index - len(found)]
found.append(error)
return found
def __enter__(self: _Self) -> _Self:
self._log_handler = _LogRecordCaptor()
logging.getLogger().addHandler(self._log_handler)
self.mount, self.server = self._mount_and_server_constructor()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.server.stop()
logging.getLogger().removeHandler(self._log_handler)
del self.mount, self.server
logged_errors = self.list_logged_exceptions(del_log_records=False)
if logged_errors: # pragma: no cover
raise logged_errors[0]
return None
class _LogRecordCaptor(logging.NullHandler):
def __init__(self) -> None:
self.records: List[logging.LogRecord] = []
super().__init__()
def handle(self, record: logging.LogRecord) -> None:
self.records.append(record)
|
from django.shortcuts import render
from django.shortcuts import HttpResponse
def index(request):
return HttpResponse("hi")
# Create your views here.
|
import multi_cavity_base
import states
from util import isiter, sort_eigenstates
import numpy as np
import qutip
import copy
class CavityArray(multi_cavity_base.CavityArray):
def __init__(self, num_cavities, num_photons, model_params, periodic=False):
"""Create new CavityArray object
Args:
num_cavities: number of cavities in the array
num_photons: number of photons
model_params: dict containing
'emitters_per_cavity': List or float, number of emitters in each cavity
'kappa': List or float, cavity decay rate
'hopping': List or float, cavity-cavity hopping rate
'gamma': List or float, emitter decay rate
'g': List or float, cavity emitter coupling constants
'cavity_freqs': List or float, cavity frequency
'emitter_freqs': List or float, emitter frequencies
periodic: bool, True for periodic boundary conditions
"""
super().__init__(num_cavities, num_photons, model_params, periodic)
self.states = states.States(self.num_cavities, self.model_params['emitters_per_cavity'], self.num_photons)
def hamiltonian(self):
H = np.zeros((len(self.states), len(self.states)), dtype='complex')
for col, state in enumerate(self.states):
for loc in set(state):
#cavity-emitter interaction terms
if loc[1] >= 0: #excited emitter
#a.dag_i * sigma_ij
newstate, n = states.create((loc[0], -1), *states.destroy(loc, state))
if newstate in self.states:
row = self.states.index(newstate)
H[row][col] += self.cavities[loc[0]].g[loc[1]]
#hopping terms
if loc[1] == -1: #photon in cavity
# a.dag_(i+1) * a_i
newstate, n = states.create((loc[0]+1, -1), *states.destroy(loc, state))
if self.periodic and self.num_cavities > 2: newstate = [(quanta[0]%self.num_cavities, quanta[1]) for quanta in newstate]
if newstate in self.states:
row = self.states.index(newstate)
H[row][col] -= self.hopping[loc[0]] * n
H += H.T #add the transpose terms
#add in a.dag a and sigma.dag sigma terms on the diagonal
for col, state in enumerate(self.states):
for loc in set(state):
newstate, n = states.number(loc, state)
if loc[1] >= 0: #emitter
w = self.cavities[loc[0]].emitter_freqs[loc[1]] - 0.5j * self.cavities[loc[0]].gamma[loc[1]]
else:
w = self.cavities[loc[0]].cavity_freq - 0.5j * self.cavities[loc[0]].kappa
H[col][col] += w * n
return qutip.Qobj(H)
def eigenstates(self):
"""Wrapper function for numpy.linalg.eig
Returns the eigenvalues and eigenvectors of the Hamiltonian sorted by energy level
"""
if self._eigenstates is None:
self._eigenstates = np.linalg.eig(self.hamiltonian())
self._eigenstates = sort_eigenstates(*self._eigenstates)
return self._eigenstates
|
#-*-coding:utf-8-*-
from .PyTorch_AlexNet import *
from .PyTorch_ResNet import * |
# Welcome to the third lesson in the Yesselman Group's Python series
# Topics covered: Lists
########################################################################################################################
# Part I: List basics
# As you have seen so far, variables are a great way to store information. That being said, it can be very cumbersome to
# write out a large number of variables. Imagine you have to store test scores for every student in Chem 109 (probably
# about 1300). Writing out that many variables would take a very long time (see below):
# score1 = XX
# score2 = XX
# ...
# score1300 = XX
# There has to be a better way, right? Enter Lists. In Python, a list is a "container" that holds other data inside it.
# Lists are extremely useful in python because they allow easy store and access of data. Below are two examples of how to
# make lists. What do you think the code on lines 19 and 20 will print? Uncomment them and see.
my_list = [1,2,3,4,5]
my_list2 = list([6,7,8,9,10])
# print(my_list)
# print(my_list2)
# The basic syntax is [list_name] = [ elements ]. Note the elements HAVE to be surrounded with square brackets. Because
# lists have no value themselves, it is often important to know their size. In python the function that finds a list's
# size is len(). It has syntax very similar to type(). What do you think the following statements will print? Uncomment them
# and see:
#
# print(len(my_list))
# print(len(my_list2))
########################################################################################################################
# Part II: Adding elements to a list
# Now that you have an idea of what lists are, it's time to talk about the four main ways to create them:
# Method 1: Literal declarations
# Literal declarations were shown above. Like strings, booleans, ints, etc, you can always declare a list directly with
# specific elements as above. On a side note, the contents of a list can be very diverse. They can be different types or
# even other lists! See below for some examples of list declarations:
multitype_list = ["first", 2, 3, 4, 10.059, True]
two_dimensional_matrix = [[1,2,3,4], [5,6,7,8]]
# you can get pretty creative with these declarations so please explore them a bit!
# Method 2: Appending
# Appending is a way to add another element at the end of a list. Below is an example of appending a value to a list
my_list3 = [0,1,2]
# print(len(my_list3))
my_list3.append(3)
# print(len(my_list3))
# What do you think will be printed? Uncomment and see. It should be noted that when you append to a list, you always
# increase the size of the list by 1.
# Method 3: Concatenating with the "+=" operator
# The "+" and "+=" operators work very similarly with lists as they do with strings. Namely, the "+" operator adds together
# two lists and the "+=" adds the right hand side list to the left hand side one. Below are two examples:
#print(my_list + my_list2)
my_list4 = [9,9,9,9,9]
my_list4 += [-1,-1,-1,-1]
#print(my_list4)
# Method 4: Repeating an element
# As with strings, lists can be repeated by using the "*" and "*=" operators. Namely, these operators allow you to repeat
# the entirety of the list an integer number of times. See the below examples:
small_list = [1]
# print(small_list*100)
small_list *= 50
# print(small_list)
# Clearly, there are many ways to add elements to lists and it is good to understand how they all work. In practice, you
# will actually use all of them frequently, so please get to using them!
########################################################################################################################
# Part III: Removing elements from a list
# Using lists is about controlling the data they hold, so it is also useful to know how to remove data elements from them.
# There are three main ways to delete elements from a list:
# Method 1: Deleting all elements with .clear()/[]
# The simplest case of removal is completely deleting the list's elements. There are two ways to do this in python, using
# the .clear() method and setting the list equal to empty brackets "[]". Given what you know about list sizes and the
# len() function, what do you expect the following code to produce:
need_to_empty = [1]*10000
# print(len(need_to_empty))
need_to_empty.clear()
# print(len(need_to_empty))
need_to_empty = [1]*10000
# print(len(need_to_empty))
need_to_empty = []
# print(len(need_to_empty))
# Method 2: Removing a specific element with the .pop() method
# If you need to remove a specific element from a list, the .pop() method can be used. It takes an element index as an
# argument, removes it and shifts every element behind it over, decreasing the size of the list by 1. In programming,
# counting starts at 0, that is, the first element in a list has index 1 and the nth has index n-1 (more on this in part
# III). Either way, below are some examples:
kill_first_entry = [1,2,3,4,5]
kill_first_entry.pop(0)
# print(kill_first_entry)
kill_second_entry = [1,2,3,4,5]
kill_second_entry.pop(1)
# print(kill_second_entry)
# Method 3: Removing an instance with a specific value .remove() method
# If you want to remove a value from a list by identity, use the remove .remove() method. It goes through the list and
# removes the first element that is equal to the specified removal value. This method is especially useful if you have a list
# of strings and want to remove specific strings. Below is an example:
many_letters = ["A","B","C","A","B","C","A","B","C","A","B","C"]
# print(len(many_letters))
many_letters.remove("A")
many_letters.remove("B")
# print(len(many_letters))
########################################################################################################################
# Part IV: Accessing elements in a list
# Now that you know how to add and remove data from a list, it is time to cover element access in python lists. As mentioned
# before, the counting of elements in lists starts at 0. For an example, see below:
index_list = [1,2,3,4,5]
# element index 0,1,2,3,4
# This important for element access as the format is list[index], which makes use of the bracket operator "[]". Below is
# an example of list element access:
# print(index_list[0])
# Becuase the size of a list often changes, python lists also support negative indexing. That is, to get the last element
# in a list, instead of putting in len(list) - 1, you can simply write "-1". This is especially useful if you are going to be
# changing the list's size:
# print(index_list[-1])
index_list.append(10)
# print(index_list[-1])
# Lastly, its important to know that you can "slice" lists by using a colon in the bracket operator. The result is a smaller
# list made up of the selected elements from the bigger list. The general syntax is [begin:end] although if you leave
# either blank, the begin is assumed to be 0 and the end is assume to be len(list) - 1. Below are some examples of how this
# works:
slice_list = [1,2,3,4,5]
# printing second element on
# print(slice_list[1:])
# printing first element to one less than the last
# print(slice_list[:-1])
# printing second and third elements
# print(slice_list[1:3])
########################################################################################################################
# Part V: The "in" statement and similarities to strings
# It is often very useful to check if an element is in an a list, which is done with the the "in" statement. The statement
# has the syntax ELEMENT in LIST and returns a boolean. From this, it is very useful for if statements:
small_list = [0]
if 1 in small_list:
pass
else:
small_list.append(1)
# Lastly, a little connection between lists and strings can be drawn. It turns out that in python, strings are essentially
# lists of one string elements. Due to this, the [] operator also works for strings. Strings will be revisited in the
# future, but it is very useful to know how to use the [] operator. What do you think the following will print?
str1 = "strings are just like lists!"
# print(str1[0])
# print(str1[:-20])
########################################################################################################################
# Homework
# (T/F) python lists are indexed from 0
# (T/F) in python, you can put lists inside of lists
# You have a list of all the students at UNL, but want to remove anyone with your name. Given the list "unl_students"
# below, how would you do this?
unl_students = [] # assume this is a real list
# Create a list with the sequence [1,2,3] repeated 100 times. Print the initial list size, remove the first 2 and the last
# element in the list. What is the list's size now?
# Print out the type of the first element in the below list:
hw_list = ["A", "B", "C"]
# Bug busters. What is wrong with the following code? How would you fix it?
long_list = [1]*1000
last_index = len(long_list) - 1
long_list.pop(-1)
# print(long_list[last_index])
# Challenge problem: Write code that creates a mathematical matrix of dimension 10x10 with every element equal to 10.
|
# pragma pylint: disable=missing-docstring,C0103
import datetime
from pathlib import Path
from unittest.mock import MagicMock
from freqtrade.data.converter import parse_ticker_dataframe
from freqtrade.data.history import pair_data_filename
from freqtrade.misc import (datesarray_to_datetimearray, file_dump_json,
file_load_json, format_ms_time, plural, shorten_date)
def test_shorten_date() -> None:
str_data = '1 day, 2 hours, 3 minutes, 4 seconds ago'
str_shorten_data = '1 d, 2 h, 3 min, 4 sec ago'
assert shorten_date(str_data) == str_shorten_data
def test_datesarray_to_datetimearray(ticker_history_list):
dataframes = parse_ticker_dataframe(ticker_history_list, "5m", pair="UNITTEST/BTC",
fill_missing=True)
dates = datesarray_to_datetimearray(dataframes['date'])
assert isinstance(dates[0], datetime.datetime)
assert dates[0].year == 2017
assert dates[0].month == 11
assert dates[0].day == 26
assert dates[0].hour == 8
assert dates[0].minute == 50
date_len = len(dates)
assert date_len == 2
def test_file_dump_json(mocker) -> None:
file_open = mocker.patch('freqtrade.misc.open', MagicMock())
json_dump = mocker.patch('rapidjson.dump', MagicMock())
file_dump_json(Path('somefile'), [1, 2, 3])
assert file_open.call_count == 1
assert json_dump.call_count == 1
file_open = mocker.patch('freqtrade.misc.gzip.open', MagicMock())
json_dump = mocker.patch('rapidjson.dump', MagicMock())
file_dump_json(Path('somefile'), [1, 2, 3], True)
assert file_open.call_count == 1
assert json_dump.call_count == 1
def test_file_load_json(mocker, testdatadir) -> None:
# 7m .json does not exist
ret = file_load_json(pair_data_filename(testdatadir, 'UNITTEST/BTC', '7m'))
assert not ret
# 1m json exists (but no .gz exists)
ret = file_load_json(pair_data_filename(testdatadir, 'UNITTEST/BTC', '1m'))
assert ret
# 8 .json is empty and will fail if it's loaded. .json.gz is a copy of 1.json
ret = file_load_json(pair_data_filename(testdatadir, 'UNITTEST/BTC', '8m'))
assert ret
def test_format_ms_time() -> None:
# Date 2018-04-10 18:02:01
date_in_epoch_ms = 1523383321000
date = format_ms_time(date_in_epoch_ms)
assert type(date) is str
res = datetime.datetime(2018, 4, 10, 18, 2, 1, tzinfo=datetime.timezone.utc)
assert date == res.astimezone(None).strftime('%Y-%m-%dT%H:%M:%S')
res = datetime.datetime(2017, 12, 13, 8, 2, 1, tzinfo=datetime.timezone.utc)
# Date 2017-12-13 08:02:01
date_in_epoch_ms = 1513152121000
assert format_ms_time(date_in_epoch_ms) == res.astimezone(None).strftime('%Y-%m-%dT%H:%M:%S')
def test_plural() -> None:
assert plural(0, "page") == "pages"
assert plural(0.0, "page") == "pages"
assert plural(1, "page") == "page"
assert plural(1.0, "page") == "page"
assert plural(2, "page") == "pages"
assert plural(2.0, "page") == "pages"
assert plural(-1, "page") == "page"
assert plural(-1.0, "page") == "page"
assert plural(-2, "page") == "pages"
assert plural(-2.0, "page") == "pages"
assert plural(0.5, "page") == "pages"
assert plural(1.5, "page") == "pages"
assert plural(-0.5, "page") == "pages"
assert plural(-1.5, "page") == "pages"
assert plural(0, "ox", "oxen") == "oxen"
assert plural(0.0, "ox", "oxen") == "oxen"
assert plural(1, "ox", "oxen") == "ox"
assert plural(1.0, "ox", "oxen") == "ox"
assert plural(2, "ox", "oxen") == "oxen"
assert plural(2.0, "ox", "oxen") == "oxen"
assert plural(-1, "ox", "oxen") == "ox"
assert plural(-1.0, "ox", "oxen") == "ox"
assert plural(-2, "ox", "oxen") == "oxen"
assert plural(-2.0, "ox", "oxen") == "oxen"
assert plural(0.5, "ox", "oxen") == "oxen"
assert plural(1.5, "ox", "oxen") == "oxen"
assert plural(-0.5, "ox", "oxen") == "oxen"
assert plural(-1.5, "ox", "oxen") == "oxen"
|
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash_table
import plotly.express as px
import pandas as pd
import pkg_resources
from callbacks import register_callbacks
meta_viewport = {"name": "viewport", "content": "width=device-width, initial-scale=1, shrink-to-fit=no"}
app = dash.Dash(__name__,
meta_tags=[meta_viewport],
external_stylesheets = [dbc.themes.BOOTSTRAP]
)
# app.enable_dev_tools(
# dev_tools_ui=True,
# dev_tools_serve_dev_bundles=True,
# )
topbar = dbc.Navbar(
[
dbc.Row(
[
dbc.Col(dbc.NavbarBrand("Dynamical X-ray Diffraction")),
],
align="center",
no_gutters=True,
className = "mr-auto pr-2"
),
], className = "mb-2", sticky = "top")
CIF_LIST = pkg_resources.resource_listdir('pydxd', "data/cif")
select_cif = select = dbc.Select(
id="cif_select",
options=[{"label": x, "value": x} for x in CIF_LIST],
value = 'Cu_mp-30_conventional_standard.cif',
persistence = True, persistence_type = 'session',
)
sites_table = dash_table.DataTable(
columns = [
{
'id': 'Z',
'name': 'Z',
},
{
'id': 'name',
'name': 'Atom',
},
{
'id': 'label',
'name': 'Label',
},
{
'id': 'zcoord',
'name': 'z',
'type': 'numeric',
'format': {'specifier': '.4f'},
},
{
'id': 'Hdotr',
'name': 'H·r',
'type': 'numeric',
'format': {'specifier': '.4f'},
},
{
'id': 'cohpos',
'name': 'CP',
'type': 'numeric',
'format': {'specifier': '.4f'},
},
],
data = [],
editable = False,
page_action = 'none',
style_header= {'fontWeight': 'bold'},
style_cell = {'font-family': 'sans-serif', 'textAlign': 'center', 'whiteSpace': 'normal', 'font-size': '11pt'},
id='sites_table',
)
app.layout = dbc.Container(
[
topbar,
html.Div([
html.Hr(),
dbc.Container(
dbc.Row([
# Inputs
dbc.Col([
dbc.Form([
dbc.FormGroup(
[
dbc.Label("CIF", width=3),
dbc.Col(select_cif, width=9),
],
row=True,
),
]),
dbc.FormGroup(
[
dbc.Label("(hkl)", width=3),
dbc.Col([
dbc.InputGroup([
dbc.Input(
type="numeric",
id={'type': 'hkl', 'index': 'h'},
placeholder="h",
value=1,
persistence = True, persistence_type = 'session'
),
dbc.Input(
type="numeric",
id={'type': 'hkl', 'index': 'k'},
placeholder="k",
value=1,
persistence = True, persistence_type = 'session'
),
dbc.Input(
type="numeric",
id={'type': 'hkl', 'index': 'l'},
placeholder="l",
value=1,
persistence = True, persistence_type = 'session'
),
], size="sm")
], className = "my-auto", width=9),
],
row=True,
),
dbc.FormGroup(
[
dbc.Label("Scan mode", width=3),
dbc.Col([
dbc.RadioItems(
id="scanmode_select",
options=[
{"label": "Angle", "value": 'angle'},
{"label": "Energy", "value": 'energy'},
],
value = 'angle',
persistence = True, persistence_type = 'session'
),
], className = "my-auto", width=3),
dbc.Label("Energy (eV)", id = "scanmode_fixed_label", width=3),
dbc.Col([
dbc.Input(
type="numeric",
id="scanmode_fixed_value",
value = 15000,
bs_size = "sm",
persistence = True, persistence_type = 'session',
)
], className = "my-auto", width=3),
],
row=True,
),
dbc.FormGroup(
[
dbc.Label("Δx", width=3),
dbc.Col([
dbc.Input(
type="numeric",
id="xrange_delta",
value = 5,
bs_size = "sm",
persistence = True, persistence_type = 'session',
),
], className = "my-auto", width=3),
dbc.Label("# points", width=3),
dbc.Col([
dbc.Input(
type="numeric",
id="xrange_npts",
value = 501,
bs_size = "sm",
persistence = True, persistence_type = 'session',
)
], className = "my-auto", width=3),
],
row=True,
),
], width = 6),
# Sites table
dbc.Col([
html.Div(sites_table, style = {'overflowY': 'auto', 'height': '210px'}),
]),
]),
),
html.Hr(),
dbc.Alert(
id = 'results_alert',
is_open = False,
duration = 2000,
),
dbc.Container([
dbc.Row(
[
# Controls and calculated parameters
dbc.Col([
dbc.Button('Calculate', id = 'results_calculate_button'),
], width = 4),
dbc.Col(
[
dbc.Row([
dbc.Col(dcc.Markdown('d<sub>hkl</sub>', dangerously_allow_html = True), width = 2),
dbc.Col(id = {'type': 'results_parameter', 'index': 'd_hkl'}, width=10),
], className = 'rowresults'),
dbc.Row([
dbc.Col(dcc.Markdown('Θ<sub>B</sub>', dangerously_allow_html = True), width = 2),
dbc.Col(id = {'type': 'results_parameter', 'index': 'Bragg_angle'}, width=10),
], className = 'rowresults'),
dbc.Row([
dbc.Col(dcc.Markdown('E<sub>B</sub>', dangerously_allow_html = True), width = 2),
dbc.Col(id = {'type': 'results_parameter', 'index': 'Bragg_energy'}, width=10),
], className = 'rowresults'),
dbc.Row([
dbc.Col(dcc.Markdown('λ<sub>B</sub>', dangerously_allow_html = True), width = 2),
dbc.Col(id = {'type': 'results_parameter', 'index': 'Bragg_wavelength'}, width=10),
], className = 'rowresults'),
],
width = 4,
),
dbc.Col(
[
dbc.Row([
dbc.Col(dcc.Markdown('F<sub>0</sub>', dangerously_allow_html = True), width = 2),
dbc.Col(id = {'type': 'results_parameter', 'index': 'F_0'}, width=10),
], className = 'rowresults'),
dbc.Row([
dbc.Col(dcc.Markdown('F<sub>h</sub>', dangerously_allow_html = True), width = 2),
dbc.Col(id = {'type': 'results_parameter', 'index': 'F_H'}, width=10),
], className = 'rowresults'),
dbc.Row([
dbc.Col(dcc.Markdown('F<sub>hb</sub>', dangerously_allow_html = True), width = 2),
dbc.Col(id = {'type': 'results_parameter', 'index': 'F_Hb'}, width=10),
], className = 'rowresults'),
dbc.Row([
dbc.Col(dcc.Markdown('φ<sub>h</sub>', dangerously_allow_html = True), width = 2),
dbc.Col(id = {'type': 'results_parameter', 'index': 'Angle_F_H'}, width=10),
], className = 'rowresults'),
],
width = 4,
),
],
className = "pb-2",
),
dbc.Row([
# Refl, Phase
dbc.Col([
html.Div(dcc.Graph(id = 'results_figure_refl'))
], width = 5),
# EF
dbc.Col([
dbc.Row([
dbc.Col([
], width = 3),
dbc.Col([
html.Div(dcc.Graph(id = 'results_figure_elf_x'))
], width = 9),
]),
dbc.Row([
dbc.Col([
html.Div(dcc.Graph(id = 'results_figure_elf_y'))
], width = 3),
dbc.Col([
html.Div(dcc.Graph(id = 'results_figure_elf'))
], width = 9),
])
], width = 7),
]),
]),
dbc.Row([
# RC figure
dbc.Col([
]),
# RC controls
dbc.Col([
]),
]),
])
]
)
register_callbacks(app)
if __name__ == '__main__':
app.run_server(debug=True, port=5666) |
from functools import reduce
from typing import Set
from unittest import TestCase, main
from program_graphs.adg.parser.java.parser import parse # type: ignore
from slicing.block.block import gen_block_slices
from slicing.block.block import get_entry_candidates, get_node_lines, mk_declared_variables_table
from slicing.block.filters import at_least_one_block_stmt, last_or_next_statement_is_control
from slicing.block.state import State
from slicing.block.utils import get_occupied_line_range, count_ncss, find_blank_and_full_comment_lines
class TestBlockSlice(TestCase):
def test_entry_candidates(self) -> None:
code = """
stmt();
if (){
stmt();
stmt();
}
for (;;){
stmt();
stmt();
}
stmt()
"""
adg = parse(code)
state = State(adg, None, None, {}, {}, {}) # type: ignore
entry_lines: Set[int] = reduce(
lambda a, b: a | b,
[get_node_lines(adg, n) for n in get_entry_candidates(state)],
set([])
)
self.assertSetEqual(entry_lines, {1, 2, 3, 6, 7, 10})
def test_block_slice_control_flow(self) -> None:
code = """
if (){
stmt();
stmt();
}
stmt();
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1, 2, 3, 4], bss)
self.assertIn([1, 2, 3, 4, 5], bss)
self.assertIn([2], bss)
self.assertIn([2, 3], bss)
self.assertIn([5], bss)
self.assertNotIn([3], bss)
self.assertNotIn([1, 2], bss)
self.assertNotIn([1, 2, 3], bss)
self.assertNotIn([2, 3, 4], bss)
self.assertNotIn([2, 3, 5], bss)
def test_block_slice_control_flow_if_else(self) -> None:
code = """
if (x == 0) {
stmt();
} else {
stmt();
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1, 2, 3, 4, 5], bss)
self.assertIn([2], bss)
self.assertIn([4], bss)
self.assertNotIn([1, 2], bss)
self.assertNotIn([3], bss)
self.assertNotIn([4, 5], bss)
def test_block_slice_data_dependency(self) -> None:
code = """
int a = 1;
int b = a;
int c = a + b;
int d = c;
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1, 2, 3, 4], bss)
self.assertIn([2, 3, 4], bss)
self.assertIn([3, 4], bss)
self.assertIn([1], bss)
self.assertIn([2], bss)
self.assertIn([3], bss)
self.assertIn([4], bss)
self.assertIn([1, 2, 3], bss)
self.assertNotIn([1, 2], bss)
def test_block_slice_class_field_not_need_to_return(self) -> None:
code = """
int a = 1;
int d = 1;
a = a + 1;
b = a + d;
int c = a + b;
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([2, 3, 4], bss)
def test_mk_declared_variables_table(self) -> None:
code = """
T o;
int a = 1;
a = a + 1;
o[0] = 1;
int c = a + o[0];
"""
adg = parse(code)
t = mk_declared_variables_table(adg.to_ddg(), code)
self.assertIn(set(['a']), t.values())
self.assertIn(set(['o']), t.values())
self.assertIn(set(['c']), t.values())
def test_block_slice_readonly_object_not_need_to_return(self) -> None:
code = """
T o;
int a = 1;
int b = 1;
a = a + b;
o[0] = 1;
int c = a + o[0];
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([3, 4, 5], bss)
def test_block_slice_no_repetitions(self) -> None:
code = """
int a = 1;
for (;;){
stmt();
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1], bss)
self.assertIn([3], bss)
self.assertIn([2, 3, 4], bss)
self.assertIn([1, 2, 3, 4], bss)
self.assertEqual(len(bss), 4)
def test_block_slice_bad_format(self) -> None:
code = """
int a = 1;
for (;;)
{
stmt();
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1], bss)
self.assertIn([4], bss)
self.assertIn([2, 3, 4, 5], bss)
self.assertIn([1, 2, 3, 4, 5], bss)
self.assertEqual(len(bss), 4)
def test_block_slice_var_declaration(self) -> None:
code = """
int a = 1;
int b = 1;
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1], bss)
self.assertIn([2], bss)
self.assertIn([1, 2], bss)
def test_block_slice_if_else_if_first_line_shared(self) -> None:
code = """
if () {
} else if (){
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1, 2, 3], bss)
self.assertNotIn([2, 3], bss)
def test_block_slice_if_else_if_new_line(self) -> None:
code = """
if () {
} else
if (){
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1, 2, 3, 4], bss)
self.assertIn([3, 4], bss)
def test_block_slice_at_least_one_block_stmt(self) -> None:
code = """
stmt;
if (){ }
stmt;
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code, [at_least_one_block_stmt])]
self.assertIn([1, 2], bss)
self.assertIn([2, 3], bss)
self.assertIn([1, 2, 3], bss)
self.assertIn([2], bss)
self.assertNotIn([1], bss)
self.assertNotIn([3], bss)
def test_block_slice_last_ast_statement_filter(self) -> None:
code = """
stmt;
stmt;
if (){ }
stmt;
stmt;
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in
gen_block_slices(adg, code, [last_or_next_statement_is_control])]
self.assertIn([1, 2], bss)
self.assertIn([1, 2, 3], bss)
self.assertIn([1, 2, 3, 4, 5], bss)
self.assertIn([3], bss)
self.assertIn([4, 5], bss)
self.assertIn([3, 4, 5], bss)
self.assertNotIn([1], bss)
self.assertNotIn([3, 4], bss)
def test_block_slice_try_catch(self) -> None:
code = """
try {
stmt;
} catch (Exception e) {
stmt;
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1, 2, 3, 4, 5], bss)
self.assertIn([2], bss)
self.assertIn([4], bss)
self.assertNotIn([2, 3, 4, 5], bss)
def test_block_slice_try_finally(self) -> None:
code = """
try(T a = new T();) {
stmt();
}
finally {
stmt();
}
stmt();
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertNotIn([5, 6, 7], bss)
self.assertNotIn([2, 3, 4, 5, 6], bss)
self.assertNotIn([2, 3, 4, 5, 6, 7], bss)
def test_block_slice_try_with_resources(self) -> None:
code = """
try (T a = new T();) {
stmt;
}
if () {
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([2], bss)
self.assertIn([4, 5], bss)
self.assertIn([1, 2, 3], bss)
self.assertIn([1, 2, 3, 4, 5], bss)
self.assertNotIn([2, 3, 4, 5], bss)
def test_block_slice_for(self) -> None:
code = """
for (int i = 0; i < 10; i++){
stmt();
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1, 2, 3], bss)
self.assertIn([2], bss)
self.assertNotIn([1, 2], bss)
def test_block_slice_for_last_line_shared(self) -> None:
code = """
for (int i = 0; i < 10; i++){
stmt();
} int x =
y + 4;
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertNotIn([1, 2, 3], bss)
self.assertIn([1, 2, 3, 4], bss)
self.assertIn([2], bss)
self.assertNotIn([3, 4], bss)
def test_block_slice_for_update_clause_not_included(self) -> None:
code = """
stmt;
for (expr;expr;stmt){
stmt();
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1], bss)
self.assertIn([3], bss)
self.assertIn([2, 3, 4], bss)
self.assertIn([1, 2, 3, 4], bss)
self.assertNotIn([2, 3], bss)
self.assertNotIn([3, 4], bss)
def test_block_slice_complete_return(self) -> None:
code = """
stmt;
if (){
return;
} else {
return;
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1], bss)
self.assertIn([3], bss)
self.assertIn([5], bss)
self.assertIn([1, 2, 3, 4, 5, 6], bss)
self.assertIn([2, 3, 4, 5, 6], bss)
def test_block_slice_complete_return_and_throw(self) -> None:
code = """
stmt;
if (){
return;
} else {
throw;
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1], bss)
self.assertIn([3], bss)
self.assertIn([1, 2, 3, 4, 5, 6], bss)
self.assertIn([2, 3, 4, 5, 6], bss)
def test_block_slice_non_complete_return(self) -> None:
code = """
stmt;
if (){
return;
} else {
stmt;
}
stmt;
stmt;
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([1], bss)
self.assertIn([3], bss)
self.assertIn([5], bss)
self.assertIn([7], bss)
self.assertIn([7, 8], bss)
self.assertNotIn([1, 2, 3, 4, 5, 6], bss)
self.assertNotIn([1, 2, 3, 4, 5, 6, 7], bss)
self.assertIn([2, 3, 4, 5, 6, 7, 8], bss)
self.assertIn([1, 2, 3, 4, 5, 6, 7, 8], bss)
def test_block_slice_bug(self) -> None:
code = """
A a = new A();
B b = new B();
a == 1;
b == 1;
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertNotIn([1, 2], bss)
def test_block_slice_non_complete_return_but_there_are_no_statement_after(self) -> None:
code = """
if () {
syncronized (a) {
if (){
return;
} else {
stmt;
}
}
}
"""
adg = parse(code)
bss = [sorted(bs.block_slice_lines()) for bs in gen_block_slices(adg, code)]
self.assertIn([3, 4, 5, 6, 7], bss, msg='we can extract this block because there are no more statement after')
def test_count_ncss(self) -> None:
comment_lines = count_ncss(((0, 0), (9, 0)), {2, 3, 4})
self.assertEqual(comment_lines, 7)
def test_find_comment_lines(self) -> None:
code = """
// comment
if (){
// comment
stmt(); // comment
a = b +
// comment
+ c;
} else {
stmt();
}"""
adg = parse(code)
comment_lines = find_blank_and_full_comment_lines(adg.to_ast(), adg.get_entry_node())
self.assertSetEqual(comment_lines, {1, 2, 4, 7, 9})
def test_get_occupied_line_range_whole_snippet(self) -> None:
code = """
if (){
stmt();
} else {
stmt();
}"""
adg = parse(code)
r = get_occupied_line_range(adg.to_ast(), adg.get_entry_node())
self.assertEqual(r, ((1, 8), (5, 9)))
def test_get_occupied_line_range_whole_snippet_long(self) -> None:
code = """
if
()
{
stmt();
}
else
{
stmt();
}"""
adg = parse(code)
r = get_occupied_line_range(adg.to_ast(), adg.get_entry_node())
self.assertEqual(r, ((1, 8), (9, 9)))
def test_get_occupied_line_range_whole_snippet_with_comments(self) -> None:
code = """
// comment
if (){
/*
Multiline comment
*/
stmt();
} else {
stmt();
}"""
adg = parse(code)
r = get_occupied_line_range(adg.to_ast(), adg.get_entry_node())
self.assertEqual(r, ((1, 8), (9, 9)))
def test_ncss_whole_snippet_with_multiline_statement(self) -> None:
code = """
if (){
a = b +
c;
}"""
adg = parse(code)
r = get_occupied_line_range(adg.to_ast(), adg.get_entry_node())
self.assertEqual(r, ((1, 8), (4, 9)))
def test_get_occupied_line_range_multiline_single_statement(self) -> None:
code = """
fun(
a,
b,
c
);"""
adg = parse(code)
r = get_occupied_line_range(adg.to_ast(), adg.get_entry_node())
self.assertEqual(r, ((1, 8), (5, 10)))
def test_get_occupied_line_range_with_one_line_multiple_stmts(self) -> None:
code = """
if (){
int a = 4; stmt();
}"""
adg = parse(code)
r = get_occupied_line_range(adg.to_ast(), adg.get_entry_node())
self.assertEqual(r, ((1, 8), (3, 9)))
def test_get_occupied_line_range(self) -> None:
code = """
if (){
int a = 4;
}
"""
adg = parse(code)
[if_node] = [node for node, name in adg.nodes(data='name') if name == 'if']
r = get_occupied_line_range(adg.to_ast(), if_node)
self.assertEqual(r, ((1, 8), (3, 9)))
def test_get_occupied_line_range_no_brackets(self) -> None:
code = """
if ()
int a = 4;
"""
adg = parse(code)
[if_node] = [node for node, name in adg.nodes(data='name') if name == 'if']
r = get_occupied_line_range(adg.to_ast(), if_node)
self.assertEqual(r, ((1, 8), (2, 22)))
def test_get_occupied_line_range_bad_format(self) -> None:
code = """
if ()
{
int a = 4;
stmt();} stmt();
"""
adg = parse(code)
[if_node] = [node for node, name in adg.nodes(data='name') if name == 'if']
r = get_occupied_line_range(adg.to_ast(), if_node)
self.assertEqual(r, ((1, 8), (4, 20)))
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
import os
import pytest
import shutil
import subprocess
import importlib
import inspect
import compas
def get_names_in_module(module_name):
exceptions = ['absolute_import', 'division', 'print_function']
module = importlib.import_module(module_name)
all_names = module.__all__ if hasattr(module, '__all__') else dir(module)
return sorted([i for i in all_names if not i.startswith('_') and i not in exceptions and not inspect.ismodule(getattr(module, i))])
@pytest.fixture
def compas_fab_api():
if compas.IPY:
return
modules = [
'compas_fab.backends',
'compas_fab.robots',
'compas_fab.sensors',
'compas_fab.utilities',
]
api = {}
for module_name in modules:
api[module_name] = get_names_in_module(module_name)
return api
@pytest.fixture
def compas_fab_stubs():
if compas.IPY:
return
env = compas._os.prepare_environment()
HERE = os.path.dirname(__file__)
HOME = os.path.abspath(os.path.join(HERE, '../..'))
TEMP = os.path.abspath(os.path.join(HOME, 'temp/stubs'))
DOCS = os.path.abspath(os.path.join(HOME, 'docs'))
API = os.path.abspath(os.path.join(DOCS, 'api'))
shutil.rmtree(TEMP, ignore_errors=True)
_, _, filenames = next(os.walk(API))
for name in filenames:
if name == 'compas_fab.rst' or not name.startswith('compas_fab.'):
continue
stub = os.path.abspath(os.path.join(API, name))
subprocess.call('sphinx-autogen -o {} {}'.format(TEMP, stub), shell=True, env=env)
_, _, filenames = next(os.walk(TEMP))
shutil.rmtree(TEMP, ignore_errors=True)
stubs = {}
for name in filenames:
parts = name.split('.')
if len(parts) != 4:
continue
package = parts[0]
module = parts[1]
item = parts[2]
if package == 'compas_fab':
packmod = "{}.{}".format(package, module)
if packmod not in stubs:
stubs[packmod] = []
stubs[packmod].append(item)
return stubs
def test_compas_api_stubs(compas_fab_api, compas_fab_stubs):
if compas.IPY:
return
for packmod in compas_fab_api:
parts = packmod.split('.')
if len(parts) != 2:
continue
assert packmod in compas_fab_stubs
for name in compas_fab_api[packmod]:
if name in ['Configuration', 'PosCon3D', 'PosConCM']:
continue
# deprecated functions
if name in ['read_data_from_json', 'write_data_to_json']:
continue
assert name in compas_fab_stubs[packmod], 'missing {} in {}'.format(name, packmod)
|
#
# PySNMP MIB module RADLAN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RADLAN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:59:59 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint")
dot1dBasePortEntry, Timeout, MacAddress, dot1dBasePort, BridgeId = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dBasePortEntry", "Timeout", "MacAddress", "dot1dBasePort", "BridgeId")
dot1xPaePortNumber, dot1xAuthSessionStatsEntry, PaeControlledPortStatus = mibBuilder.importSymbols("IEEE8021-PAE-MIB", "dot1xPaePortNumber", "dot1xAuthSessionStatsEntry", "PaeControlledPortStatus")
InterfaceIndexOrZero, ifIndex, InterfaceIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero", "ifIndex", "InterfaceIndex")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
PortList, dot1qFdbId, VlanIndex = mibBuilder.importSymbols("Q-BRIDGE-MIB", "PortList", "dot1qFdbId", "VlanIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, enterprises, Unsigned32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, NotificationType, MibIdentifier, Gauge32, Integer32, Counter32, ObjectIdentity, iso, Bits, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "enterprises", "Unsigned32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "NotificationType", "MibIdentifier", "Gauge32", "Integer32", "Counter32", "ObjectIdentity", "iso", "Bits", "TimeTicks")
Counter_32, Unsigned_32, Integer_32 = mibBuilder.importSymbols("SNMPv2-SMI-v1", "Counter-32", "Unsigned-32", "Integer-32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
DisplayString, DateAndTime, PhysAddress, TruthValue, TimeInterval, RowPointer, RowStatus = mibBuilder.importSymbols("SNMPv2-TC-v1", "DisplayString", "DateAndTime", "PhysAddress", "TruthValue", "TimeInterval", "RowPointer", "RowStatus")
class Percents(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 100)
class NetNumber(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
class VlanPriority(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 7)
class RlStormCtrlRateUnit(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("packetsPerSecond", 1), ("bytesPerSecond", 2), ("framesPerBuffer", 3), ("precentages", 4), ("kiloBytesPerSecond", 5), ("kiloBitsPerSecond", 6))
rnd = MibIdentifier((1, 3, 6, 1, 4, 1, 89))
rndNotifications = ObjectIdentity((1, 3, 6, 1, 4, 1, 89, 0))
if mibBuilder.loadTexts: rndNotifications.setStatus('current')
if mibBuilder.loadTexts: rndNotifications.setDescription(" All the rnd notifications will reside under this branch as specified in RFC2578 'Structure of Management Information Version 2 (SMIv2)' 8.5")
rndMng = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 1))
rndDeviceParams = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 2))
rndBridgeType = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48))).clone(namedValues=NamedValues(("reb", 1), ("ceb", 2), ("ceblb", 3), ("xeb", 4), ("xeb1", 5), ("rebsx", 6), ("rtb", 7), ("ltb", 8), ("tre", 9), ("rtre", 10), ("xtb", 11), ("ete", 12), ("rete", 13), ("ielb", 30), ("leb", 31), ("openGate12", 32), ("openGate4", 33), ("ran", 34), ("itlb", 35), ("gatelinx", 36), ("openGate2", 37), ("ogRanTR", 38), ("rdapter", 39), ("ogVan", 40), ("wanGate", 41), ("ogRubE", 42), ("ogRubT", 43), ("wanGateI", 44), ("vGate4", 45), ("lre", 46), ("mrt", 47), ("vGate2", 48)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndBridgeType.setStatus('mandatory')
if mibBuilder.loadTexts: rndBridgeType.setDescription('Identification of the RND bridge type.')
rndInactiveArpTimeOut = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndInactiveArpTimeOut.setStatus('mandatory')
if mibBuilder.loadTexts: rndInactiveArpTimeOut.setDescription('This variable defines the maximum time period that can pass between ARP requests concerning an entry in the ARP table. After this time period, the entry is deleted from the table.')
rndBridgeAlarm = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 2, 3))
rndErrorDesc = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 3, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndErrorDesc.setStatus('mandatory')
if mibBuilder.loadTexts: rndErrorDesc.setDescription('A textual description of the enterprise-specific trap sent to the Network Management Station by the RND managed device.')
rndErrorSeverity = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("info", 0), ("warning", 1), ("error", 2), ("fatal-error", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndErrorSeverity.setStatus('mandatory')
if mibBuilder.loadTexts: rndErrorSeverity.setDescription('The severity type of the enterprise-specific trap sent to the Network Management Station by the RND managed device.')
rndBrgVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndBrgVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rndBrgVersion.setDescription('The bridge version.')
rndBrgFeatures = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndBrgFeatures.setStatus('mandatory')
if mibBuilder.loadTexts: rndBrgFeatures.setDescription('A bit mask that defines the features supported by a particular configuration of this network element: __________________________________________ | Byte 1|Byte 2 |Byte 3 | ....|Byte 20 | |87654321| | 87654321| |________|_______________________________| Byte1 : bit1: TX Block mask bit2: Source Routing Encapulation bit3: SNA/SDLC bit4: Frame Relay bit5: SNMP bit6: LAN Manager bit7: High Performance bit8: Translation Byte2 : bit1: DEC Router bit2: IPX Router bit3: IP Router Byte3 : bit1: Dial Up Backup bit2: COD bit3: FACS bit4: Load Balance bit5: Remote Configuration bit6: RIP 2 bit7: OSPF bit8: IPX RIP/SAP Filter Byte4 : bit1: BootP Server bit2: BootP Client bit3: Compression bit4: V25.bis bit5: ISDN bit6: CODv2 bit7: NSPF bit8: UDP Relay Byte5 bit1:VirtualLAN bit2:Static IP Multicast bit3:IP Redundancy bit4:CCM2 bit5:ISDN Bonding bit6:Backup Link Selection -- for the VAN/Rdapter ver 4.0 bit7:IP/IPX Forwarding -- for the WANgate ver 4.0 bit8:Improved COD Byte6 bit1: Server Disptacher bit2: ISDN_US -- for the VANSX/WANGATE ver 5.0 bit3: PPP bit4: IP Rip Filter -- for Vgate3 bit5: Zero Hop Routing -- for Vgate3 bit6: ISDN Japan bit7: PPP-Security Byte7 bit1: With unmanaged Switch bit2: 2 LANs bit3: OSPF Ver 2.0 bit4: FACS Ver 2.0 bit5: Multiple WEB Farm bit6: Backup Server bit7: Check Connectivity bit8: WSD multiplexing Byte8 bit1: MRT3 bit2: WSD Redundancy bit3: DHCP Server bit4: WSD Connection Limit bit5: WSD Distributed System bit6: WSD Load Report bit7: WSD super farm bit8: RadWiz leased line Byte9 bit1: PPP IP address negotiaton bit2: DNS bit3: Nat bit4: WSD Static proximity bit5: WSD Dynamic proximity bit6: WSD Proxy bit7: WSD Proximity client bit8: MAC Load balancing Byte10 bit1: Unnum Inf bit2: Power Supply redundancy bit3: MRT PPP Compression bit4: ZHR Apolo bit5: Copy port bit6: UDP Relay 2.0 bit7: NVRAM bit8: URL table Byte11 bit1: URL super farm bit2: NAT on LAN bit3: Remote Con bit4: AP5000 bit5: Session tracking bit6: Mirroring bit7: Alias IP bit8: CSD Nat Byte12 bit1: content check bit2: mlb virtual ip bit3: reserved RadLan bit4: csd nat exception bit5: statistics monitor bit6: reserved-for-radware ')
rndBrgLicense = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 6), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndBrgLicense.setStatus('mandatory')
if mibBuilder.loadTexts: rndBrgLicense.setDescription('This parameter is used for entering a s/w license number for a device. A separate license number is supplied for each device.')
rndIpHost = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 2, 7))
rndCommunityTable = MibTable((1, 3, 6, 1, 4, 1, 89, 2, 7, 2), )
if mibBuilder.loadTexts: rndCommunityTable.setStatus('mandatory')
if mibBuilder.loadTexts: rndCommunityTable.setDescription('The community table of the agent')
rndCommunityEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 2, 7, 2, 1), ).setIndexNames((0, "RADLAN-MIB", "rndCommunityMngStationAddr"), (1, "RADLAN-MIB", "rndCommunityString"))
if mibBuilder.loadTexts: rndCommunityEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rndCommunityEntry.setDescription(' The row definition for this table.')
rndCommunityMngStationAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 7, 2, 1, 1), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndCommunityMngStationAddr.setStatus('mandatory')
if mibBuilder.loadTexts: rndCommunityMngStationAddr.setDescription('The management station that will be allowed to communicate with the agent IP address')
rndCommunityString = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 7, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndCommunityString.setStatus('mandatory')
if mibBuilder.loadTexts: rndCommunityString.setDescription('The community string with which the management station will communicate with the agent')
rndCommunityAccess = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("readOnly", 1), ("readWrite", 2), ("super", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndCommunityAccess.setStatus('mandatory')
if mibBuilder.loadTexts: rndCommunityAccess.setDescription('The allowed access to this management station')
rndCommunityTrapsEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 7, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("snmpV1", 1), ("snmpV2", 2), ("snmpV3", 3), ("trapsDisable", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndCommunityTrapsEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rndCommunityTrapsEnable.setDescription('Should the agent send traps to the management station, and what version is required')
rndCommunityStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 7, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("invalid", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndCommunityStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rndCommunityStatus.setDescription('The status of this entry. If the status is invalid the community entry will be deleted')
rndCommunityPortSecurity = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 7, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndCommunityPortSecurity.setStatus('mandatory')
if mibBuilder.loadTexts: rndCommunityPortSecurity.setDescription('If enabled the device will only receive SNMP messages from the port, through which this NMS is reachable from the device.')
rndCommunityOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 7, 2, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndCommunityOwner.setStatus('mandatory')
if mibBuilder.loadTexts: rndCommunityOwner.setDescription('The owner of this community')
rndCommunityTrapDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 7, 2, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(162)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndCommunityTrapDestPort.setStatus('mandatory')
if mibBuilder.loadTexts: rndCommunityTrapDestPort.setDescription('The transport protocol (usually UDP) port to which traps to the management station represebted by this entry will be sent. The default is the well-known IANA assigned port number for SNMP traps. This object is relevant only if rndCommunityTrapsEnable has a value different from trapsDisable.')
rlMridTable = MibTable((1, 3, 6, 1, 4, 1, 89, 2, 7, 3), )
if mibBuilder.loadTexts: rlMridTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlMridTable.setDescription('The MRID related configurations table of the agent')
rlMridEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 2, 7, 3, 1), ).setIndexNames((0, "RADLAN-MIB", "rndCommunityMngStationAddr"), (1, "RADLAN-MIB", "rndCommunityString"))
if mibBuilder.loadTexts: rlMridEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlMridEntry.setDescription(' The row definition for this table.')
rlMridConnection = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 7, 3, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMridConnection.setStatus('mandatory')
if mibBuilder.loadTexts: rlMridConnection.setDescription('The router instance connecting the NMS who accessed the agent through the community table entry corresponding to the keys of this entry.')
rlManagedMrid = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 7, 3, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlManagedMrid.setStatus('mandatory')
if mibBuilder.loadTexts: rlManagedMrid.setDescription('The router instance currently managed by the NMS who accessed the agent through the community table entry corresponding to the keys of this entry ')
rndManagedTime = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(6, 6)).setFixedLength(6)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndManagedTime.setStatus('mandatory')
if mibBuilder.loadTexts: rndManagedTime.setDescription('The time will be sent in the format hhmmss')
rndManagedDate = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(6, 6)).setFixedLength(6)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndManagedDate.setStatus('mandatory')
if mibBuilder.loadTexts: rndManagedDate.setDescription('The date will be sent in the format ddmmyy')
rndBaseBootVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndBaseBootVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rndBaseBootVersion.setDescription('Defines the boot version of the product.')
rndIpHostManagement = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 2, 7, 4))
rndIpHostManagementSupported = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 7, 4, 1), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndIpHostManagementSupported.setStatus('mandatory')
if mibBuilder.loadTexts: rndIpHostManagementSupported.setDescription('ifindex manage supported.')
rndIpHostManagementIfIndex = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 7, 4, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndIpHostManagementIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rndIpHostManagementIfIndex.setDescription('if supported manage , indicate ifindex, if 0 thaen disable')
genGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 2, 11))
genGroupHWVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 11, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: genGroupHWVersion.setStatus('mandatory')
if mibBuilder.loadTexts: genGroupHWVersion.setDescription('Defines the HW version of the product.')
genGroupConfigurationSymbol = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 11, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: genGroupConfigurationSymbol.setStatus('mandatory')
if mibBuilder.loadTexts: genGroupConfigurationSymbol.setDescription('Defines the Configuration Symbol attached to any hardware module manufactured by LANNET. One single character A..Z defines the CS version.')
genGroupHWStatus = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 11, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 255))).clone(namedValues=NamedValues(("ok", 1), ("hardwareProblems", 2), ("notSupported", 255)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: genGroupHWStatus.setStatus('mandatory')
if mibBuilder.loadTexts: genGroupHWStatus.setDescription('This attribute describes the status of the group hardware as detected by the sensors software.')
rndBasePhysicalAddress = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 12), PhysAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndBasePhysicalAddress.setStatus('mandatory')
if mibBuilder.loadTexts: rndBasePhysicalAddress.setDescription('The Base physical (MAC) address of the device.')
rndSoftwareFile = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 2, 13))
rndActiveSoftwareFileTable = MibTable((1, 3, 6, 1, 4, 1, 89, 2, 13, 1), )
if mibBuilder.loadTexts: rndActiveSoftwareFileTable.setStatus('mandatory')
if mibBuilder.loadTexts: rndActiveSoftwareFileTable.setDescription(' The (conceptual) table listing the active software file of the requested unit.')
rndActiveSoftwareFileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 2, 13, 1, 1), ).setIndexNames((0, "RADLAN-MIB", "rndUnitNumber"))
if mibBuilder.loadTexts: rndActiveSoftwareFileEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rndActiveSoftwareFileEntry.setDescription(' An entry (conceptual row) in the rndActiveSoftwareFileTable.')
rndUnitNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 13, 1, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndUnitNumber.setStatus('mandatory')
if mibBuilder.loadTexts: rndUnitNumber.setDescription("The unit number (for stackable devices) or 1 for 'stand alone' device.")
rndActiveSoftwareFile = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 13, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("image1", 1), ("image2", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndActiveSoftwareFile.setStatus('mandatory')
if mibBuilder.loadTexts: rndActiveSoftwareFile.setDescription('Indicates the current active software file, image1 or image2.')
rndActiveSoftwareFileAfterReset = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 13, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("image1", 1), ("image2", 2), ("invalidImage", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndActiveSoftwareFileAfterReset.setStatus('mandatory')
if mibBuilder.loadTexts: rndActiveSoftwareFileAfterReset.setDescription("Indicates the software file that will be active after reset (image1 or image2). If an error occurred in the download process, resulting in the corruption of the single software file, The value 'invalidImage' will be returned. This value can not be set by the user.")
rndImageSize = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndImageSize.setStatus('mandatory')
if mibBuilder.loadTexts: rndImageSize.setDescription('Max number of sectors currently allocated for image(s).')
rndBackupConfigurationEnabled = MibScalar((1, 3, 6, 1, 4, 1, 89, 2, 15), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndBackupConfigurationEnabled.setStatus('mandatory')
if mibBuilder.loadTexts: rndBackupConfigurationEnabled.setDescription('Specifies whether the device supports backup-config parameters in lcli commands.')
rndImageInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 2, 16))
rndImageInfoTable = MibTable((1, 3, 6, 1, 4, 1, 89, 2, 16, 1), )
if mibBuilder.loadTexts: rndImageInfoTable.setStatus('mandatory')
if mibBuilder.loadTexts: rndImageInfoTable.setDescription(' The table contains information about images.')
rndImageInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 2, 16, 1, 1), ).setIndexNames((0, "RADLAN-MIB", "rndStackUnitNumber"))
if mibBuilder.loadTexts: rndImageInfoEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rndImageInfoEntry.setDescription(' An entry in the rndImageInfoTable.')
rndStackUnitNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 16, 1, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndStackUnitNumber.setStatus('mandatory')
if mibBuilder.loadTexts: rndStackUnitNumber.setDescription("The unit number (for stackable devices) or 1 for 'stand alone' device.")
rndImage1Name = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 16, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndImage1Name.setStatus('mandatory')
if mibBuilder.loadTexts: rndImage1Name.setDescription('Indicates the file name of image-1 in the system.')
rndImage2Name = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 16, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndImage2Name.setStatus('mandatory')
if mibBuilder.loadTexts: rndImage2Name.setDescription("Indicates the file name of image-2 (if present) in the system.If image-2 is not present show 'no info' text")
rndImage1Version = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 16, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndImage1Version.setStatus('mandatory')
if mibBuilder.loadTexts: rndImage1Version.setDescription('Indicates the version of image-1 in the system.')
rndImage2Version = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 16, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndImage2Version.setStatus('mandatory')
if mibBuilder.loadTexts: rndImage2Version.setDescription("Indicates the version of image-2 (if present) in the system.If image-2 is not present show 'no info' text")
rndImage1Date = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 16, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndImage1Date.setStatus('mandatory')
if mibBuilder.loadTexts: rndImage1Date.setDescription('Indicates the compilation date of image-1 in the system.')
rndImage2Date = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 16, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndImage2Date.setStatus('mandatory')
if mibBuilder.loadTexts: rndImage2Date.setDescription("Indicates the compilation date of image-2 (if present) in the system.If image-2 is not present show 'no info' text")
rndImage1Time = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 16, 1, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndImage1Time.setStatus('mandatory')
if mibBuilder.loadTexts: rndImage1Time.setDescription('Indicates the compilation time of image-1 in the system.')
rndImage2Time = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 2, 16, 1, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndImage2Time.setStatus('mandatory')
if mibBuilder.loadTexts: rndImage2Time.setDescription("Indicates the compilation time of image-2 (if present) in the system.If image-2 is not present show 'no info' text")
rndBootP = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 24))
rndBootPServerAddress = MibScalar((1, 3, 6, 1, 4, 1, 89, 24, 1), IpAddress().clone(hexValue="00000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndBootPServerAddress.setStatus('mandatory')
if mibBuilder.loadTexts: rndBootPServerAddress.setDescription(' The IPAddress of the BOOTP server. the OpenGate will act as a BootP relay if this parameter is not equale to 0.0.0.0.')
rndBootPRelaySecThreshold = MibScalar((1, 3, 6, 1, 4, 1, 89, 24, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rndBootPRelaySecThreshold.setStatus('mandatory')
if mibBuilder.loadTexts: rndBootPRelaySecThreshold.setDescription('BootP requests are relayed only if their SEC field is greater or equal to the threshold value in order to allow local BootP Servers to answer first.')
ipSpec = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 26))
rsTunning = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 29))
rndApplications = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 35))
rsUDP = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 42))
swInterfaces = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 43))
rlIPmulticast = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 46))
rlFFT = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 47))
vlan = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 48))
rlRmonControl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 49))
rlBrgMacSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 50))
rlBrgMacSwVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 50, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlBrgMacSwVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlBrgMacSwVersion.setDescription("MIB's version, the current version is 2.")
rlBrgMacSwMaxTableNumber = MibScalar((1, 3, 6, 1, 4, 1, 89, 50, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlBrgMacSwMaxTableNumber.setStatus('mandatory')
if mibBuilder.loadTexts: rlBrgMacSwMaxTableNumber.setDescription('Maximum number of MAC Tables supported by the device.')
rlBrgMacSwDynamicTables = MibScalar((1, 3, 6, 1, 4, 1, 89, 50, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("supported", 1), ("unsupported", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlBrgMacSwDynamicTables.setStatus('mandatory')
if mibBuilder.loadTexts: rlBrgMacSwDynamicTables.setDescription('Specifies whether the device supports port dynamic MAC tables.')
rlBrgMacSwOldEntryDeleteMode = MibScalar((1, 3, 6, 1, 4, 1, 89, 50, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("refreshFlag", 1), ("agingFlag", 2), ("agingTime", 3), ("boundaries", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlBrgMacSwOldEntryDeleteMode.setStatus('mandatory')
if mibBuilder.loadTexts: rlBrgMacSwOldEntryDeleteMode.setDescription("The method used to delete old unused entries refresh_flag: The high level sets a flag when it adds or updates an entry, the Switch clears the flag each time, when it learns the station once more. aging_flag: The high level clears a flag when it adds or updates an entry, the Switch sets the flag each time, when it learns the station once more. aging_time: The high level defines a timeout, the Switch sends a message to the high level each time when the station doesn't send messages during interval between timeout and 2*timeout. Boundaries: The Switch doesn't support any mechanism, and the high level uses two boundaries: red and yellow. When the number of entries exceeds the red boundary, the high level deletes the oldest entries.")
rlBrgMacSwSpanningTree = MibScalar((1, 3, 6, 1, 4, 1, 89, 50, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("supported", 1), ("unsupported", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlBrgMacSwSpanningTree.setStatus('mandatory')
if mibBuilder.loadTexts: rlBrgMacSwSpanningTree.setDescription('Specifies whether the device supports port the Spanning Tree Protocol.')
rlBrgMacSwKeyType = MibScalar((1, 3, 6, 1, 4, 1, 89, 50, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("macOnly", 1), ("tagAndMac", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlBrgMacSwKeyType.setStatus('mandatory')
if mibBuilder.loadTexts: rlBrgMacSwKeyType.setDescription('Specified the used MAC Address Table kye format: macOnly: One global MAC Address Table per device (leaky VLANs). tagAndMac: MAC Address Table per each VLAN (strick VLANs).')
rlBrgMacSwYellowBoundary = MibScalar((1, 3, 6, 1, 4, 1, 89, 50, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlBrgMacSwYellowBoundary.setStatus('mandatory')
if mibBuilder.loadTexts: rlBrgMacSwYellowBoundary.setDescription('The yellow boundary.')
rlBrgMacSwRedBoundary = MibScalar((1, 3, 6, 1, 4, 1, 89, 50, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlBrgMacSwRedBoundary.setStatus('mandatory')
if mibBuilder.loadTexts: rlBrgMacSwRedBoundary.setDescription('The yellow boundary.')
rlBrgMacSwTrapEnable = MibScalar((1, 3, 6, 1, 4, 1, 89, 50, 10), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlBrgMacSwTrapEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rlBrgMacSwTrapEnable.setDescription('This variable indicates whether to create a SNMP trap if adding dynamic mac failed due to full hash chain.')
rlBrgMacSwOperTrapCount = MibScalar((1, 3, 6, 1, 4, 1, 89, 50, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlBrgMacSwOperTrapCount.setStatus('mandatory')
if mibBuilder.loadTexts: rlBrgMacSwOperTrapCount.setDescription("This variable indicates the trap counter status (i.e. number of received MAC addresses since the last trap sent that haven't been inserted into the hash). It is relevant only when trap is enabled.")
rlBrgMacSwAdminTrapFrequency = MibScalar((1, 3, 6, 1, 4, 1, 89, 50, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 86400)).clone(60)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlBrgMacSwAdminTrapFrequency.setStatus('mandatory')
if mibBuilder.loadTexts: rlBrgMacSwAdminTrapFrequency.setDescription('This variable indicates the minimal frequency (in seconds) of sending a trap. It is relevant only when trap is enabled.')
rlExperience = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 51))
rlCli = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 52))
rlCliMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 52, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlCliMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlCliMibVersion.setDescription("MIB's version, the current version is 1.")
rlCliPassword = MibScalar((1, 3, 6, 1, 4, 1, 89, 52, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlCliPassword.setStatus('mandatory')
if mibBuilder.loadTexts: rlCliPassword.setDescription('CLI Password')
rlCliTimer = MibScalar((1, 3, 6, 1, 4, 1, 89, 52, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 3600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlCliTimer.setStatus('mandatory')
if mibBuilder.loadTexts: rlCliTimer.setDescription('CLI Timer')
rlCliFileEnable = MibScalar((1, 3, 6, 1, 4, 1, 89, 52, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlCliFileEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rlCliFileEnable.setDescription('CLI File Enable/Disable')
rlCliFileEnableAfterReset = MibScalar((1, 3, 6, 1, 4, 1, 89, 52, 5), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlCliFileEnableAfterReset.setStatus('mandatory')
if mibBuilder.loadTexts: rlCliFileEnableAfterReset.setDescription('CLI File Enable/Disable After Reset')
rlPhysicalDescription = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 53))
rlIfInterfaces = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 54))
rlMacMulticast = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 55))
rlGalileo = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 56))
rlpBridgeMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 57))
rldot1dPriority = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 57, 1))
rldot1dPriorityMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dPriorityMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dPriorityMibVersion.setDescription("MIB's version, the current version is 1.")
rldot1dPriorityPortGroupTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 1, 2), )
if mibBuilder.loadTexts: rldot1dPriorityPortGroupTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dPriorityPortGroupTable.setDescription('A list of PortGroupNumber for each port.')
rldot1dPriorityPortGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 1, 2, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: rldot1dPriorityPortGroupEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dPriorityPortGroupEntry.setDescription('All ports belonging to a same group have the same User Priority to Traffic Class mapping.')
rldot1dPriorityPortGroupNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dPriorityPortGroupNumber.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dPriorityPortGroupNumber.setDescription('A group, that port belongs to. All ports belonging to a same group have the same User Priority to Traffic Class mapping.')
rldot1dStp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 57, 2))
rldot1dStpMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpMibVersion.setDescription("MIB's version, the current version is 2.")
rldot1dStpType = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 4))).clone(namedValues=NamedValues(("perDevice", 1), ("mstp", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpType.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpType.setDescription('Specifies whether the device supports Spanning Tree per device, or per group.')
rldot1dStpEnable = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 3), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpEnable.setDescription('Enable / Disable spanning tree. When working in per vlan mode enable / disable STP per all vlans.')
rldot1dStpPortMustBelongToVlan = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 4), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpPortMustBelongToVlan.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortMustBelongToVlan.setDescription('Specifies whether a port must belong to a VLAN in order to participate in the STP.')
rldot1dStpExtendedPortNumberFormat = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 5), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpExtendedPortNumberFormat.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpExtendedPortNumberFormat.setDescription('Specifies whether the STP uses the extended port fnumber format.')
rldot1dStpVlanTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 2, 6), )
if mibBuilder.loadTexts: rldot1dStpVlanTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanTable.setDescription('A table that contains vlan-specific information for the Spanning Tree Protocol.')
rldot1dStpVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1dStpVlan"))
if mibBuilder.loadTexts: rldot1dStpVlanEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanEntry.setDescription('A list of information maintained by every Vlan about the Spanning Tree Protocol state for that Vlan.')
rldot1dStpVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpVlan.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlan.setDescription('The Vlan index.')
rldot1dStpVlanEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1, 2), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpVlanEnable.setReference(' ?? ')
if mibBuilder.loadTexts: rldot1dStpVlanEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanEnable.setDescription('Specifies whether this vlan is STP enable or disable')
rldot1dStpTimeSinceTopologyChange = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpTimeSinceTopologyChange.setReference('IEEE 802.1D-1990: Section 6.8.1.1.3')
if mibBuilder.loadTexts: rldot1dStpTimeSinceTopologyChange.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpTimeSinceTopologyChange.setDescription('The time (in hundredths of a second) since the last time a topology change was detected by the bridge entity.')
rldot1dStpTopChanges = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpTopChanges.setReference('IEEE 802.1D-1990: Section 6.8.1.1.3')
if mibBuilder.loadTexts: rldot1dStpTopChanges.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpTopChanges.setDescription('The total number of topology changes detected by this bridge since the management entity was last reset or initialized.')
rldot1dStpDesignatedRoot = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1, 5), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpDesignatedRoot.setReference('IEEE 802.1D-1990: Section 4.5.3.1')
if mibBuilder.loadTexts: rldot1dStpDesignatedRoot.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpDesignatedRoot.setDescription('The bridge identifier of the root of the spanning tree as determined by the Spanning Tree Protocol as executed by this node. This value is used as the Root Identifier parameter in all Configuration Bridge PDUs originated by this node.')
rldot1dStpRootCost = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpRootCost.setReference('IEEE 802.1D-1990: Section 4.5.3.2')
if mibBuilder.loadTexts: rldot1dStpRootCost.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpRootCost.setDescription('The cost of the path to the root as seen from this bridge.')
rldot1dStpRootPort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpRootPort.setReference('IEEE 802.1D-1990: Section 4.5.3.3')
if mibBuilder.loadTexts: rldot1dStpRootPort.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpRootPort.setDescription('The port number of the port which offers the lowest cost path from this bridge to the root bridge.')
rldot1dStpMaxAge = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1, 8), Timeout()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpMaxAge.setReference('IEEE 802.1D-1990: Section 4.5.3.4')
if mibBuilder.loadTexts: rldot1dStpMaxAge.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpMaxAge.setDescription('The maximum age of Spanning Tree Protocol information learned from the network on any port before it is discarded, in units of hundredths of a second. This is the actual value that this bridge is currently using.')
rldot1dStpHelloTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1, 9), Timeout()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpHelloTime.setReference('IEEE 802.1D-1990: Section 4.5.3.5')
if mibBuilder.loadTexts: rldot1dStpHelloTime.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpHelloTime.setDescription('The amount of time between the transmission of Configuration bridge PDUs by this node on any port when it is the root of the spanning tree or trying to become so, in units of hundredths of a second. This is the actual value that this bridge is currently using.')
rldot1dStpHoldTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpHoldTime.setReference('IEEE 802.1D-1990: Section 4.5.3.14')
if mibBuilder.loadTexts: rldot1dStpHoldTime.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpHoldTime.setDescription('This time value determines the interval length during which no more than two Configuration bridge PDUs shall be transmitted by this node, in units of hundredths of a second.')
rldot1dStpForwardDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 6, 1, 11), Timeout()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpForwardDelay.setReference('IEEE 802.1D-1990: Section 4.5.3.6')
if mibBuilder.loadTexts: rldot1dStpForwardDelay.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpForwardDelay.setDescription('This time value, measured in units of hundredths of a second, controls how fast a port changes its spanning state when moving towards the Forwarding state. The value determines how long the port stays in each of the Listening and Learning states, which precede the Forwarding state. This value is also used, when a topology change has been detected and is underway, to age all dynamic entries in the Forwarding Database. [Note that this value is the one that this bridge is currently using, in contrast to dot1dStpBridgeForwardDelay which is the value that this bridge and all others would start using if/when this bridge were to become the root.]')
rldot1dStpVlanPortTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 2, 7), )
if mibBuilder.loadTexts: rldot1dStpVlanPortTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortTable.setDescription('A table that contains pair <vlan, port> specific information for the Spanning Tree Protocol.')
rldot1dStpVlanPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1dStpVlanPortVlan"), (0, "RADLAN-MIB", "rldot1dStpVlanPortPort"))
if mibBuilder.loadTexts: rldot1dStpVlanPortEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortEntry.setDescription('A list of information maintained by every pair <vlan, port> about the Spanning Tree Protocol state for that pair.')
rldot1dStpVlanPortVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpVlanPortVlan.setReference('IEEE 802.1s/D2-1999 ')
if mibBuilder.loadTexts: rldot1dStpVlanPortVlan.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortVlan.setDescription('The Vlan number that the port belongs to, and for which this entry contains Spanning Tree Protocol management information.')
rldot1dStpVlanPortPort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpVlanPortPort.setReference('IEEE 802.1t/D2-1999: Section 9.2.6')
if mibBuilder.loadTexts: rldot1dStpVlanPortPort.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortPort.setDescription('The port number of the port for which this entry contains Spanning Tree Protocol management information.')
rldot1dStpVlanPortPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpVlanPortPriority.setReference('IEEE 802.1t/D2-1999: Section 9.2.6')
if mibBuilder.loadTexts: rldot1dStpVlanPortPriority.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortPriority.setDescription('The value of the priority field which is contained in the more significant 4 bits of the most significant octet of the (2 octet long) Port ID. The other octet of the Port ID is given by the value of rldot1dStpVlanPort.')
rldot1dStpVlanPortState = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("disabled", 1), ("blocking", 2), ("listening", 3), ("learning", 4), ("forwarding", 5), ("broken", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpVlanPortState.setReference('IEEE 802.1D-1990: Section 4.5.5.2')
if mibBuilder.loadTexts: rldot1dStpVlanPortState.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortState.setDescription("The port's current state as defined by application of the Spanning Tree Protocol. This state controls what action a port takes on reception of a frame. If the bridge has detected a port that is malfunctioning it will place that port into the broken(6) state. For ports which are disabled (see dot1dStpVlanPortEnable), this object will have a value of disabled(1).")
rldot1dStpVlanPortEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpVlanPortEnable.setReference('IEEE 802.1D-1990: Section 4.5.5.2')
if mibBuilder.loadTexts: rldot1dStpVlanPortEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortEnable.setDescription('The enabled/disabled status of the port.')
rldot1dStpVlanPortPathCost = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpVlanPortPathCost.setReference('IEEE 802.1D-1990: Section 4.5.5.3')
if mibBuilder.loadTexts: rldot1dStpVlanPortPathCost.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortPathCost.setDescription('The contribution of this port to the path cost of paths towards the spanning tree root which include this port. 802.1D-1990 recommends that the default value of this parameter be in inverse proportion to the speed of the attached LAN.')
rldot1dStpVlanPortDesignatedRoot = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1, 7), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedRoot.setReference('IEEE 802.1D-1990: Section 4.5.5.4')
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedRoot.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedRoot.setDescription('The unique Bridge Identifier of the Bridge recorded as the Root in the Configuration BPDUs transmitted by the Designated Bridge for the segment to which the port is attached.')
rldot1dStpVlanPortDesignatedCost = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedCost.setReference('IEEE 802.1D-1990: Section 4.5.5.5')
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedCost.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedCost.setDescription('The path cost of the Designated Port of the segment connected to this port. This value is compared to the Root Path Cost field in received bridge PDUs.')
rldot1dStpVlanPortDesignatedBridge = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1, 9), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedBridge.setReference('IEEE 802.1D-1990: Section 4.5.5.6')
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedBridge.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedBridge.setDescription("The Bridge Identifier of the bridge which this port considers to be the Designated Bridge for this port's segment.")
rldot1dStpVlanPortDesignatedPort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedPort.setReference('IEEE 802.1D-1990: Section 4.5.5.7')
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedPort.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortDesignatedPort.setDescription("The Port Identifier of the port on the Designated Bridge for this port's segment.")
rldot1dStpVlanPortForwardTransitions = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 7, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpVlanPortForwardTransitions.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpVlanPortForwardTransitions.setDescription('The number of times this port has transitioned from the Learning state to the Forwarding state.')
rldot1dStpTrapVariable = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 57, 2, 8))
rldot1dStpTrapVrblifIndex = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 8, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpTrapVrblifIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpTrapVrblifIndex.setDescription('The ifIndex of port which STP status was changed')
rldot1dStpTrapVrblVID = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 8, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpTrapVrblVID.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpTrapVrblVID.setDescription('The VID of VLAN to which the port belongs which STP status was changed')
rldot1dStpTypeAfterReset = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 4))).clone(namedValues=NamedValues(("perDevice", 1), ("mstp", 4))).clone('perDevice')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpTypeAfterReset.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpTypeAfterReset.setDescription('New mode of spaning tree supported by the device after the next reset.')
rldot1dStpMonitorTime = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 20)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpMonitorTime.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpMonitorTime.setDescription('Factor of hello-time during which a port is monotored to determine if it is stable.')
rldot1dStpBpduCount = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpBpduCount.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpBpduCount.setDescription('The number of bpdu that need to received for the link to be considered stable.')
rldot1dStpLastChanged = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 12), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpLastChanged.setReference('')
if mibBuilder.loadTexts: rldot1dStpLastChanged.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpLastChanged.setDescription('The last time any object in this table was changed by SNMP or other local management means.')
rldot1dStpPortTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 2, 13), )
if mibBuilder.loadTexts: rldot1dStpPortTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortTable.setDescription('A table that contains extended pair port specific information.')
rldot1dStpPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1dStpPortPort"))
if mibBuilder.loadTexts: rldot1dStpPortEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortEntry.setDescription('A list of information maintained by every port about the Spanning Tree Protocol state for that port.')
rldot1dStpPortPort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpPortPort.setReference('IEEE 802.1t/D2-1999: Section 9.2.6')
if mibBuilder.loadTexts: rldot1dStpPortPort.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortPort.setDescription('The port number of the port for which this entry contains Spanning Tree Protocol management information.')
rldot1dStpPortDampEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1, 2), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpPortDampEnable.setReference('')
if mibBuilder.loadTexts: rldot1dStpPortDampEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortDampEnable.setDescription('Specified if dampening is enabled on this port.')
rldot1dStpPortDampStable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1, 3), TruthValue().clone('true')).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpPortDampStable.setReference('')
if mibBuilder.loadTexts: rldot1dStpPortDampStable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortDampStable.setDescription('Specified if the port is stable.')
rldot1dStpPortFilterBpdu = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1, 4), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpPortFilterBpdu.setReference('')
if mibBuilder.loadTexts: rldot1dStpPortFilterBpdu.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortFilterBpdu.setDescription('Specified if this port should filter bpdus when stp is disabled.')
rldot1dStpPortBpduSent = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1, 5), Counter_32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpPortBpduSent.setReference('')
if mibBuilder.loadTexts: rldot1dStpPortBpduSent.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortBpduSent.setDescription('Specified the number of bpdu sent from this port.')
rldot1dStpPortBpduReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1, 6), Counter_32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpPortBpduReceived.setReference('')
if mibBuilder.loadTexts: rldot1dStpPortBpduReceived.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortBpduReceived.setDescription('Specified the number of bpdu received in this port.')
rldot1dStpPortRole = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 0), ("disabled", 1), ("alternate", 2), ("backup", 3), ("root", 4), ("designated", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpPortRole.setReference('')
if mibBuilder.loadTexts: rldot1dStpPortRole.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortRole.setDescription('Specified the role of this this port.')
rldot1dStpBpduType = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("stp", 0), ("rstp", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpBpduType.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpBpduType.setDescription('Specified the type of BPDU transmitted by this port.')
rldot1dStpPortRestrictedRole = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1, 9), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpPortRestrictedRole.setReference('IEEE 802.1ad-D3-1: Section 13.24.29')
if mibBuilder.loadTexts: rldot1dStpPortRestrictedRole.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortRestrictedRole.setDescription('If TRUE causes the Port not to be selected as Root Port for the CIST or any MSTI, even if it has the best spanning tree priority vector. Such a Port will be selected as an Alternate Port after the Root Port has been selected.')
rldot1dStpPortAutoEdgePort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1, 10), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpPortAutoEdgePort.setReference('IEEE 802.1D-2004: Section 17.13.3')
if mibBuilder.loadTexts: rldot1dStpPortAutoEdgePort.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortAutoEdgePort.setDescription("If TRUE causes the Port when become up, to enter the blocking state, and if during 3 seconds it doesn't receive a BPDU, it will enter the forwarding state.")
rldot1dStpPortLoopback = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 13, 1, 11), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpPortLoopback.setReference('')
if mibBuilder.loadTexts: rldot1dStpPortLoopback.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortLoopback.setDescription('Specified loopback was detected on port.')
rldot1dStpPortsEnable = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 14), TruthValue().clone('true')).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpPortsEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortsEnable.setDescription('Enable / Disable spanning tree on ports by default .')
rldot1dStpTaggedFlooding = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 15), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpTaggedFlooding.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpTaggedFlooding.setDescription('flooding can be done in tagged bpdu .')
rldot1dStpPortBelongToVlanDefault = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 16), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpPortBelongToVlanDefault.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortBelongToVlanDefault.setDescription('The default value of rldot1dStpPortMustBelongToVlan .')
rldot1dStpEnableByDefault = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpEnableByDefault.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpEnableByDefault.setDescription('The default value of rldot1dStpEnable .')
rldot1dStpPortToDefault = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 18), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpPortToDefault.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortToDefault.setDescription('To order port/s to revert to default setings .')
rldot1dStpSupportedType = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("perDevice", 1), ("perVlan", 2), ("mstp", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpSupportedType.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpSupportedType.setDescription('The type of stp supported by the device.')
rldot1dStpEdgeportSupportInStp = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 20), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dStpEdgeportSupportInStp.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpEdgeportSupportInStp.setDescription('If EdgePort is supported in stpCompatible mode .')
rldot1dStpFilterBpdu = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 21), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpFilterBpdu.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpFilterBpdu.setDescription('Specified if the device should filter BPDUs when STP is disabled.')
rldot1dStpFloodBpduMethod = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("classic", 0), ("bridging", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpFloodBpduMethod.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpFloodBpduMethod.setDescription('Specified flooding method: 1 - classic - untagged to all stp disabled ports 2 - bridging -normal bridging.')
rldot1dStpSeparatedBridges = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 57, 2, 23))
rldot1dStpPortBpduGuardTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 2, 24), )
if mibBuilder.loadTexts: rldot1dStpPortBpduGuardTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortBpduGuardTable.setDescription('A table that contains for each port whether it is bpdu guard .')
rldot1dStpPortBpduGuardEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 2, 24, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: rldot1dStpPortBpduGuardEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortBpduGuardEntry.setDescription('A list of information maintained by every port whether it is bpdu guard.')
rldot1dStpPortBpduGuardEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 24, 1, 1), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpPortBpduGuardEnable.setReference('')
if mibBuilder.loadTexts: rldot1dStpPortBpduGuardEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpPortBpduGuardEnable.setDescription('Specified if bpdu guard is enabled on this port.')
rldot1dStpSeparatedBridgesTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 2, 23, 1), )
if mibBuilder.loadTexts: rldot1dStpSeparatedBridgesTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpSeparatedBridgesTable.setDescription('Define if separated bridges feature is enabled for each interface.')
rldot1dStpSeparatedBridgesEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 2, 23, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rldot1dStpSeparatedBridgesEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpSeparatedBridgesEntry.setDescription('Defines the contents of each line in the rlSeparatedBridgesTable table.')
rldot1dStpSeparatedBridgesPortEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 2, 23, 1, 1, 1), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpSeparatedBridgesPortEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpSeparatedBridgesPortEnable.setDescription('This variable indicates whether the separated bridge feature is enabled on a specified ifIndex.')
rldot1dStpSeparatedBridgesEnable = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 23, 2), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpSeparatedBridgesEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpSeparatedBridgesEnable.setDescription('Enable / Disable Separated Bridges Feature.')
rldot1dStpSeparatedBridgesAutoConfig = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 2, 23, 3), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1dStpSeparatedBridgesAutoConfig.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dStpSeparatedBridgesAutoConfig.setDescription('Enable / Disable Separated Bridges Automatic Configuration.')
rldot1dExtBase = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 57, 3))
rldot1dExtBaseMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dExtBaseMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dExtBaseMibVersion.setDescription("MIB's version, the current version is 1.")
rldot1dDeviceCapabilities = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 3, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dDeviceCapabilities.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dDeviceCapabilities.setDescription('Indicates the optional parts of private extension of IEEE 802.1D and 802.1Q that are implemented by this device and are manageable through this MIB. Capabilities that are allowed on a per-port basis are indicated in dot1dPortCapabilities.')
rldot1wRStp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 57, 4))
rldot1wRStpVlanEdgePortTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 4, 1), )
if mibBuilder.loadTexts: rldot1wRStpVlanEdgePortTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1wRStpVlanEdgePortTable.setDescription('A table that contains pair <vlan, port> specific information for the Rapid Spanning Tree Protocol.')
rldot1wRStpVlanEdgePortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 4, 1, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1wRStpVlanEdgePortVlan"), (0, "RADLAN-MIB", "rldot1wRStpVlanEdgePortPort"))
if mibBuilder.loadTexts: rldot1wRStpVlanEdgePortEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1wRStpVlanEdgePortEntry.setDescription('A list of information maintained by every pair <vlan, port> about the RAPID Spanning Tree Protocol state for that pair.')
rldot1wRStpVlanEdgePortVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1wRStpVlanEdgePortVlan.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1wRStpVlanEdgePortVlan.setDescription('The Vlan number that the port belongs to, and for which this entry contains Spanning Tree Protocol management information, If STP per device then only one value of 1 is allowed. If STP per a VLAN then all value of 1..4095 are allowed.')
rldot1wRStpVlanEdgePortPort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 4, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1wRStpVlanEdgePortPort.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1wRStpVlanEdgePortPort.setDescription('The port number of the port for which this entry contains Spanning Tree Protocol management information.')
rldot1wRStpEdgePortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 4, 1, 1, 3), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1wRStpEdgePortStatus.setReference('IEEE 802.1wd6-2000: Section 17.13.3.1 ')
if mibBuilder.loadTexts: rldot1wRStpEdgePortStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1wRStpEdgePortStatus.setDescription('Specifies whether this port is an Edge Port or not')
rldot1wRStpForceVersionTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 4, 2), )
if mibBuilder.loadTexts: rldot1wRStpForceVersionTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1wRStpForceVersionTable.setDescription('A table that contains <vlan> specific information for the Rapid Spanning Tree Protocol.')
rldot1wRStpForceVersionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 4, 2, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1wRStpForceVersionVlan"))
if mibBuilder.loadTexts: rldot1wRStpForceVersionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1wRStpForceVersionEntry.setDescription('A list of information maintained by every <vlan> about the RAPID Spanning Tree Protocol state for that pair.')
rldot1wRStpForceVersionVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1wRStpForceVersionVlan.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1wRStpForceVersionVlan.setDescription('The Vlan number that the port belongs to, and for which this entry contains Spanning Tree Protocol management information, If STP per device then only one value of 1 is allowed. If STP per a VLAN then all value of 1..4095 are allowed.')
rldot1wRStpForceVersionState = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 4, 2, 1, 2), Integer32().clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1wRStpForceVersionState.setReference('IEEE 802.1wd9-2000: Section 17.16.1 ')
if mibBuilder.loadTexts: rldot1wRStpForceVersionState.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1wRStpForceVersionState.setDescription('Specifies whether this Bridge uses the normal RSTP algorithm, or STP Compatibility algorythm: 0 - STP Compatibility 2 - Normal RSTP')
rldot1pPriorityMap = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 57, 5))
rldot1pPriorityMapState = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 5, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1pPriorityMapState.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1pPriorityMapState.setDescription('enable / disable')
rldot1pPriorityMapTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 5, 2), )
if mibBuilder.loadTexts: rldot1pPriorityMapTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1pPriorityMapTable.setDescription('This table hold information the priority maps')
rldot1pPriorityMapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 5, 2, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1pPriorityMapName"))
if mibBuilder.loadTexts: rldot1pPriorityMapEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1pPriorityMapEntry.setDescription('The row definition for this table.')
rldot1pPriorityMapName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 5, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 25))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1pPriorityMapName.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1pPriorityMapName.setDescription('The map name')
rldot1pPriorityMapPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 5, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1pPriorityMapPriority.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1pPriorityMapPriority.setDescription('The map holding the queue')
rldot1pPriorityMapPort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 5, 2, 1, 3), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1pPriorityMapPort.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1pPriorityMapPort.setDescription('the port that the map, is applied on in config')
rldot1pPriorityMapPortList = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 5, 2, 1, 4), PortList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1pPriorityMapPortList.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1pPriorityMapPortList.setDescription('the ports that the map, is applied on in actual')
rldot1pPriorityMapStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 5, 2, 1, 5), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1pPriorityMapStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1pPriorityMapStatus.setDescription("The status of the table entry. It's used to delete an entry")
rldot1sMstp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 57, 6))
rldot1sMstpInstanceTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 6, 1), )
if mibBuilder.loadTexts: rldot1sMstpInstanceTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceTable.setDescription('A table that contains Mstp instance specific information for the Multiple Spanning Tree Protocol.')
rldot1sMstpInstanceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1sMstpInstanceId"))
if mibBuilder.loadTexts: rldot1sMstpInstanceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceEntry.setDescription('A list of information maintained by every instance about the multiple Spanning Tree Protocol state for that instance.')
rldot1sMstpInstanceId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceId.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceId.setDescription('The Instance index.')
rldot1sMstpInstanceEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceEnable.setReference(' ?? ')
if mibBuilder.loadTexts: rldot1sMstpInstanceEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceEnable.setDescription('Specifies whether this Instance is STP enable or disable')
rldot1sMstpInstanceTimeSinceTopologyChange = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceTimeSinceTopologyChange.setReference('IEEE 802.1D-1990: Section 6.8.1.1.3')
if mibBuilder.loadTexts: rldot1sMstpInstanceTimeSinceTopologyChange.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceTimeSinceTopologyChange.setDescription('The time (in hundredths of a second) since the last time a topology change was detected by the Mstp Instance .')
rldot1sMstpInstanceTopChanges = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceTopChanges.setReference('IEEE 802.1D-1990: Section 6.8.1.1.3')
if mibBuilder.loadTexts: rldot1sMstpInstanceTopChanges.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceTopChanges.setDescription('The total number of topology changes detected by this Instance since the management entity was last reset or initialized.')
rldot1sMstpInstanceDesignatedRoot = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 5), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceDesignatedRoot.setReference('IEEE 802.1D-1990: Section 4.5.3.1')
if mibBuilder.loadTexts: rldot1sMstpInstanceDesignatedRoot.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceDesignatedRoot.setDescription('The bridge identifier of the root of the spanning tree as determined by the Muliple Spanning Tree Protocol as executed by this node. This value is used as the Root Identifier parameter in all Configuration Bridge PDUs originated by this node.')
rldot1sMstpInstanceRootCost = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceRootCost.setReference('IEEE 802.1D-1990: Section 4.5.3.2')
if mibBuilder.loadTexts: rldot1sMstpInstanceRootCost.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceRootCost.setDescription('The cost of the path to the root as seen from this bridge.')
rldot1sMstpInstanceRootPort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceRootPort.setReference('IEEE 802.1D-1990: Section 4.5.3.3')
if mibBuilder.loadTexts: rldot1sMstpInstanceRootPort.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceRootPort.setDescription('The port number of the port which offers the lowest cost path from this bridge to the root bridge.')
rldot1sMstpInstanceMaxAge = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 8), Timeout()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceMaxAge.setReference('IEEE 802.1D-1990: Section 4.5.3.4')
if mibBuilder.loadTexts: rldot1sMstpInstanceMaxAge.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceMaxAge.setDescription('The maximum age of Spanning Tree Protocol information learned from the network on any port before it is discarded, in units of hundredths of a second. This is the actual value that this bridge is currently using.')
rldot1sMstpInstanceHelloTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 9), Timeout()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceHelloTime.setReference('IEEE 802.1D-1990: Section 4.5.3.5')
if mibBuilder.loadTexts: rldot1sMstpInstanceHelloTime.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceHelloTime.setDescription('The amount of time between the transmission of Configuration bridge PDUs by this node on any port when it is the root of the spanning tree or trying to become so, in units of hundredths of a second. This is the actual value that this bridge is currently using.')
rldot1sMstpInstanceHoldTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceHoldTime.setReference('IEEE 802.1D-1990: Section 4.5.3.14')
if mibBuilder.loadTexts: rldot1sMstpInstanceHoldTime.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceHoldTime.setDescription('This time value determines the interval length during which no more than two Configuration bridge PDUs shall be transmitted by this node, in units of hundredths of a second.')
rldot1sMstpInstanceForwardDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 11), Timeout()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceForwardDelay.setReference('IEEE 802.1D-1990: Section 4.5.3.6')
if mibBuilder.loadTexts: rldot1sMstpInstanceForwardDelay.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceForwardDelay.setDescription('This time value, measured in units of hundredths of a second, controls how fast a port changes its spanning state when moving towards the Forwarding state. The value determines how long the port stays in each of the Listening and Learning states, which precede the Forwarding state. This value is also used, when a topology change has been detected and is underway, to age all dynamic entries in the Forwarding Database. [Note that this value is the one that this bridge is currently using, in contrast to dot1dStpBridgeForwardDelay which is the value that this bridge and all others would start using if/when this bridge were to become the root.]')
rldot1sMstpInstancePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 61440))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1sMstpInstancePriority.setReference('IEEE 802.1S-2001: Section 13.24.2')
if mibBuilder.loadTexts: rldot1sMstpInstancePriority.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePriority.setDescription('The value of the write-able portion of the Bridge ID, i.e., the first four bits of the first octet of the (8 octet long) Bridge ID.The value is a product of 4096. The next 12 bit are the msti id . The other (last) 6 octets of the Bridge ID are given by the value of dot1dBaseBridgeAddress.')
rldot1sMstpInstanceRemainingHopes = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstanceRemainingHopes.setReference('IEEE 802.1D-1990: Section 4.5.3.14')
if mibBuilder.loadTexts: rldot1sMstpInstanceRemainingHopes.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstanceRemainingHopes.setDescription('This count value determines the amount of hopes the information transmited by this bridge on this instance can travel.')
rldot1sMstpInstancePortTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 6, 2), )
if mibBuilder.loadTexts: rldot1sMstpInstancePortTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortTable.setDescription('A table that contains pair <msti, port> specific information for the Spanning Tree Protocol.')
rldot1sMstpInstancePortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1sMstpInstancePortMstiId"), (0, "RADLAN-MIB", "rldot1sMstpInstancePortPort"))
if mibBuilder.loadTexts: rldot1sMstpInstancePortEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortEntry.setDescription('A list of information maintained by every pair <msti, port> about the Spanning Tree Protocol state for that pair.')
rldot1sMstpInstancePortMstiId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstancePortMstiId.setReference('IEEE 802.1s/D11-2001 ')
if mibBuilder.loadTexts: rldot1sMstpInstancePortMstiId.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortMstiId.setDescription('The Vlan group number that the port belongs to, and for which this entry contains Spanning Tree Protocol management information.')
rldot1sMstpInstancePortPort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstancePortPort.setReference('IEEE 802.1t/D2-1999: Section 9.2.6')
if mibBuilder.loadTexts: rldot1sMstpInstancePortPort.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortPort.setDescription('The port number of the port for which this entry contains Spanning Tree Protocol management information.')
rldot1sMstpInstancePortPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 240))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1sMstpInstancePortPriority.setReference('IEEE 802.1t/D2-1999: Section 9.2.6')
if mibBuilder.loadTexts: rldot1sMstpInstancePortPriority.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortPriority.setDescription('The value of the priority field which is contained in the more significant 4 bits of the most significant octet of the (2 octet long) Port ID. The value is a product of 16. The other octet of the Port ID is given by the value of rldot1dStpVlanGroupPort.')
rldot1sMstpInstancePortState = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("disabled", 1), ("blocking", 2), ("listening", 3), ("learning", 4), ("forwarding", 5), ("broken", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstancePortState.setReference('IEEE 802.1D-1990: Section 4.5.5.2')
if mibBuilder.loadTexts: rldot1sMstpInstancePortState.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortState.setDescription("The port's current state as defined by application of the Spanning Tree Protocol. This state controls what action a port takes on reception of a frame. If the bridge has detected a port that is malfunctioning it will place that port into the broken(6) state. For ports which are disabled (see dot1dStpVlanPortEnable), this object will have a value of disabled(1).")
rldot1sMstpInstancePortEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstancePortEnable.setReference('IEEE 802.1D-1990: Section 4.5.5.2')
if mibBuilder.loadTexts: rldot1sMstpInstancePortEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortEnable.setDescription('The enabled/disabled status of the port.')
rldot1sMstpInstancePortPathCost = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 200000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstancePortPathCost.setReference('IEEE 802.1D-1990: Section 4.5.5.3')
if mibBuilder.loadTexts: rldot1sMstpInstancePortPathCost.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortPathCost.setDescription('The contribution of this port to the path cost of paths towards the spanning tree root which include this port. 802.1D-1990 recommends that the default value of this parameter be in inverse proportion to the speed of the attached LAN.')
rldot1sMstpInstancePortDesignatedRoot = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 7), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedRoot.setReference('IEEE 802.1D-1990: Section 4.5.5.4')
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedRoot.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedRoot.setDescription('The unique Bridge Identifier of the Bridge recorded as the Root in the Configuration BPDUs transmitted by the Designated Bridge for the segment to which the port is attached.')
rldot1sMstpInstancePortDesignatedCost = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedCost.setReference('IEEE 802.1D-1990: Section 4.5.5.5')
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedCost.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedCost.setDescription('The path cost of the Designated Port of the segment connected to this port. This value is compared to the Root Path Cost field in received bridge PDUs.')
rldot1sMstpInstancePortDesignatedBridge = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 9), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedBridge.setReference('IEEE 802.1D-1990: Section 4.5.5.6')
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedBridge.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedBridge.setDescription("The Bridge Identifier of the bridge which this port considers to be the Designated Bridge for this port's segment.")
rldot1sMstpInstancePortDesignatedPort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedPort.setReference('IEEE 802.1D-1990: Section 4.5.5.7')
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedPort.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortDesignatedPort.setDescription("The Port Identifier of the port on the Designated Bridge for this port's segment.")
rldot1sMstpInstancePortForwardTransitions = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpInstancePortForwardTransitions.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpInstancePortForwardTransitions.setDescription('The number of times this port has transitioned from the Learning state to the Forwarding state.')
rldot1sMStpInstancePortAdminPathCost = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 200000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1sMStpInstancePortAdminPathCost.setReference('IEEE 802.1D-1998: Section 8.5.5.3')
if mibBuilder.loadTexts: rldot1sMStpInstancePortAdminPathCost.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMStpInstancePortAdminPathCost.setDescription("The administratively assigned value for the contribution of this port to the path cost of paths towards the spanning tree root. Writing a value of '0' assigns the automatically calculated default Path Cost value to the port. If the default Path Cost is being used, this object returns '0' when read. This complements the object dot1dStpPortPathCost, which returns the operational value of the path cost.")
rldot1sMStpInstancePortRole = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 2, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 0), ("disabled", 1), ("alternate", 2), ("backup", 3), ("root", 4), ("designated", 5), ("master", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMStpInstancePortRole.setReference('')
if mibBuilder.loadTexts: rldot1sMStpInstancePortRole.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMStpInstancePortRole.setDescription('Specify the role of this this port.')
rldot1sMstpMaxHopes = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 6, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 40)).clone(20)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1sMstpMaxHopes.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpMaxHopes.setDescription('Max number of hopes that an MST BPDU will travel inside a region.')
rldot1sMstpConfigurationName = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 6, 4), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpConfigurationName.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpConfigurationName.setDescription("The active configuration name as will be caried in MST BPDU's.")
rldot1sMstpRevisionLevel = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 6, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpRevisionLevel.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpRevisionLevel.setDescription('The active revision level.')
rldot1sMstpVlanTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 6, 6), )
if mibBuilder.loadTexts: rldot1sMstpVlanTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpVlanTable.setDescription('A table that contains information about the alocation of vlans to groups.')
rldot1sMstpVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 6, 6, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1sMstpVlan"))
if mibBuilder.loadTexts: rldot1sMstpVlanEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpVlanEntry.setDescription('A list of information maintained by every vlan about the group it belongs to.')
rldot1sMstpVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpVlan.setReference('IEEE 802.1s/D11-2001: Section 13.7')
if mibBuilder.loadTexts: rldot1sMstpVlan.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpVlan.setDescription('The vlan number of the vlan for which this entry contains Spanning Tree Protocol management information.')
rldot1sMstpGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpGroup.setReference('')
if mibBuilder.loadTexts: rldot1sMstpGroup.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpGroup.setDescription('Specifid the active group number this vlan belonges to.')
rldot1sMstpPendingGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 6, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1sMstpPendingGroup.setReference('')
if mibBuilder.loadTexts: rldot1sMstpPendingGroup.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpPendingGroup.setDescription('Specifid the pending group number this vlan belonges to.')
rldot1sMstpExtPortTable = MibTable((1, 3, 6, 1, 4, 1, 89, 57, 6, 7), )
if mibBuilder.loadTexts: rldot1sMstpExtPortTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpExtPortTable.setDescription('A table that contains MSTP information about ports of the CIST.')
rldot1sMstpExtPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 57, 6, 7, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1sMstpExtPortPort"))
if mibBuilder.loadTexts: rldot1sMstpExtPortEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpExtPortEntry.setDescription('A list of information maintained by every port of the CIST.')
rldot1sMstpExtPortPort = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpExtPortPort.setReference('IEEE 802.1t/D2-1999: Section 9.2.6')
if mibBuilder.loadTexts: rldot1sMstpExtPortPort.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpExtPortPort.setDescription('The port number of the port for which this entry contains Spanning Tree Protocol management information.')
rldot1sMstpExtPortInternalOperPathCost = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 7, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 200000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpExtPortInternalOperPathCost.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpExtPortInternalOperPathCost.setDescription('The contribution of this port to the path cost of paths towards the spanning tree regional root which include this port. 802.1S-2002 recommends that the default value of this parameter be in inverse proportion to the speed of the attached LAN.')
rldot1sMstpExtPortDesignatedRegionalRoot = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 7, 1, 3), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpExtPortDesignatedRegionalRoot.setReference('IEEE 802.1D-1990: Section 4.5.5.4')
if mibBuilder.loadTexts: rldot1sMstpExtPortDesignatedRegionalRoot.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpExtPortDesignatedRegionalRoot.setDescription('The unique Bridge Identifier of the Bridge recorded as the Root in the Configuration BPDUs transmitted by the Designated Bridge for the segment to which the port is attached.')
rldot1sMstpExtPortDesignatedRegionalCost = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 7, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpExtPortDesignatedRegionalCost.setReference('IEEE 802.1D-1990: Section 4.5.5.5')
if mibBuilder.loadTexts: rldot1sMstpExtPortDesignatedRegionalCost.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpExtPortDesignatedRegionalCost.setDescription('The regional path cost of the Designated Port of the segment connected to this port. This value is compared to the Root Path Cost field in received bridge PDUs.')
rldot1sMstpExtPortBoundary = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 7, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpExtPortBoundary.setReference('IEEE 802.1D-1990: Section 4.5.5.5')
if mibBuilder.loadTexts: rldot1sMstpExtPortBoundary.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpExtPortBoundary.setDescription('Indication if the port is conented to to a lan segment outside or inside the region.')
rldot1sMstpExtPortInternalAdminPathCost = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 57, 6, 7, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 200000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1sMstpExtPortInternalAdminPathCost.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpExtPortInternalAdminPathCost.setDescription("The administratively assigned value for the contribution of this port to the path cost of paths towards the spanning tree root. Writing a value of '0' assigns the automatically calculated default Path Cost value to the port. If the default Path Cost is being used, this object returns '0' when read. This complements the object dot1dStpPortPathCost, which returns the operational value of the path cost.")
rldot1sMstpDesignatedMaxHopes = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 6, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpDesignatedMaxHopes.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpDesignatedMaxHopes.setDescription('Max number of hopes that an MST BPDU will travel inside a region.')
rldot1sMstpRegionalRoot = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 6, 9), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpRegionalRoot.setReference('IEEE 802.1S-2002: Section 13.16.4')
if mibBuilder.loadTexts: rldot1sMstpRegionalRoot.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpRegionalRoot.setDescription('CIST Regional Root Identifier (13.16.4). The Bridge Identifier of the current CIST Regional Root.')
rldot1sMstpRegionalRootCost = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 6, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpRegionalRootCost.setReference('IEEE 802.1S-2002: Section 12.8.1.1.3')
if mibBuilder.loadTexts: rldot1sMstpRegionalRootCost.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpRegionalRootCost.setDescription('The CIST path cost from the transmitting Bridge to the CIST Regional Root.')
rldot1sMstpPendingConfigurationName = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 6, 11), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1sMstpPendingConfigurationName.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpPendingConfigurationName.setDescription('The pending configuration name.')
rldot1sMstpPendingRevisionLevel = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 6, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1sMstpPendingRevisionLevel.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpPendingRevisionLevel.setDescription('The pending revision level.')
rldot1sMstpPendingAction = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 6, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("copyPendingActive", 1), ("copyActivePending", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1sMstpPendingAction.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpPendingAction.setDescription('The action to be done with the pending configuration. copyPendingActive - to copy the pending mst configuration to the active one. copyActivePending - to copy the active mst configuration to the pending one. ')
rldot1sMstpRemainingHops = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 6, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1sMstpRemainingHops.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1sMstpRemainingHops.setDescription('This count value determines the amount of hops the information transmitted by this bridge can travel.')
rldot1dTpAgingTime = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 57, 7))
rldot1dTpAgingTimeMin = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 7, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dTpAgingTimeMin.setReference('P802.1d/D9, July 14, 1989: Section 6.7.1.1.3')
if mibBuilder.loadTexts: rldot1dTpAgingTimeMin.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dTpAgingTimeMin.setDescription('The min timeout period in seconds for aging out dynamically learned forwarding information.')
rldot1dTpAgingTimeMax = MibScalar((1, 3, 6, 1, 4, 1, 89, 57, 7, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1dTpAgingTimeMax.setReference('P802.1d/D9, July 14, 1989: Section 6.7.1.1.3')
if mibBuilder.loadTexts: rldot1dTpAgingTimeMax.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1dTpAgingTimeMax.setDescription('The max timeout period in seconds for aging out dynamically learned forwarding information.')
rlTelnet = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 58))
rlTelnetMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 58, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlTelnetMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlTelnetMibVersion.setDescription("MIB's version, the current version is 1.")
rlTelnetPassword = MibScalar((1, 3, 6, 1, 4, 1, 89, 58, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlTelnetPassword.setStatus('mandatory')
if mibBuilder.loadTexts: rlTelnetPassword.setDescription('Telnet Password')
rlTelnetTimeout = MibScalar((1, 3, 6, 1, 4, 1, 89, 58, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlTelnetTimeout.setStatus('mandatory')
if mibBuilder.loadTexts: rlTelnetTimeout.setDescription('The number of minutes after which the TCP connection is closed if no activity is detected from the Client')
rlTelnetUsersTable = MibTable((1, 3, 6, 1, 4, 1, 89, 58, 4), )
if mibBuilder.loadTexts: rlTelnetUsersTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlTelnetUsersTable.setDescription('This table hold information about current telnet sessions')
rlTelnetUsersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 58, 4, 1), ).setIndexNames((0, "RADLAN-MIB", "rlTelnetSessionId"))
if mibBuilder.loadTexts: rlTelnetUsersEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlTelnetUsersEntry.setDescription('The row definition for this table.')
rlTelnetSessionId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 58, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlTelnetSessionId.setStatus('mandatory')
if mibBuilder.loadTexts: rlTelnetSessionId.setDescription('Telnet Session ID')
rlTelnetSessionClientAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 58, 4, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlTelnetSessionClientAddress.setStatus('mandatory')
if mibBuilder.loadTexts: rlTelnetSessionClientAddress.setDescription('Telnet Session Client IP address')
rlTelnetSessionLoginTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 58, 4, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlTelnetSessionLoginTime.setStatus('mandatory')
if mibBuilder.loadTexts: rlTelnetSessionLoginTime.setDescription('Telnet Session Login Time string')
rlTelnetSessionStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 58, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("connected", 1), ("disconnect", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlTelnetSessionStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rlTelnetSessionStatus.setDescription('Telnet Session status. After status has been set to diconnect the sessions is closed and the matching entry is deleted from the table.')
rlTelnetLoginBanner = MibScalar((1, 3, 6, 1, 4, 1, 89, 58, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlTelnetLoginBanner.setStatus('mandatory')
if mibBuilder.loadTexts: rlTelnetLoginBanner.setDescription('Telnet Login Banner. When telnet connection is established, the banner is the concatanation of this MIB and rlTelnetSecondLoginBanner.')
rlTelnetSecondLoginBanner = MibScalar((1, 3, 6, 1, 4, 1, 89, 58, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlTelnetSecondLoginBanner.setStatus('mandatory')
if mibBuilder.loadTexts: rlTelnetSecondLoginBanner.setDescription('Telnet Login Banner Extension. When telnet connection is established, the banner is the concatanation of rlTelnetLoginBanner and this MIB')
rlPolicy = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 59))
rlArpSpoofing = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 60))
rlArpSpoofingMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 60, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlArpSpoofingMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlArpSpoofingMibVersion.setDescription("MIB's version, the current version is 1.")
rlArpSpoofingTable = MibTable((1, 3, 6, 1, 4, 1, 89, 60, 2), )
if mibBuilder.loadTexts: rlArpSpoofingTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlArpSpoofingTable.setDescription('A list of the ifIndexes, IP addresses and corresponding MAC addresses.')
rlArpSpoofingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 60, 2, 1), ).setIndexNames((0, "RADLAN-MIB", "rlArpSpoofingIfIndex"), (0, "RADLAN-MIB", "rlArpSpoofingLocalIpAddr"))
if mibBuilder.loadTexts: rlArpSpoofingEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlArpSpoofingEntry.setDescription('An entry of this table specifis ifIndex,IP Address and MAC address.')
rlArpSpoofingIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 60, 2, 1, 1), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlArpSpoofingIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rlArpSpoofingIfIndex.setDescription('The physical interface for which this entry contains information.')
rlArpSpoofingLocalIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 60, 2, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlArpSpoofingLocalIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: rlArpSpoofingLocalIpAddr.setDescription('Ip addres for which the device will send ARP reply (ARP spoofing).')
rlArpSpoofingMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 60, 2, 1, 3), PhysAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlArpSpoofingMacAddr.setStatus('mandatory')
if mibBuilder.loadTexts: rlArpSpoofingMacAddr.setDescription("MAC addres with which the device will send ARP reply. If the field is ommited or its value 0.0.0.0 the device will send with the interface's MAC address.")
rlArpSpoofingRemoteIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 60, 2, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlArpSpoofingRemoteIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: rlArpSpoofingRemoteIpAddr.setDescription('Ip addres for which the device will send periodically ARP requests if its value differs from 0.0.0.0.')
rlArpSpoofingOutPhysIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 60, 2, 1, 5), InterfaceIndexOrZero()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlArpSpoofingOutPhysIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rlArpSpoofingOutPhysIfIndex.setDescription("The physical interface to which the device will send periodically ARP requests if its value differs from 0. If its value is 0 then ARP requests will send to all the VLAN's ports.")
rlArpSpoofingStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 60, 2, 1, 6), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlArpSpoofingStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rlArpSpoofingStatus.setDescription('It is used to insert, update or delete an entry')
rlMir = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 61))
rlMirMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 61, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMirMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirMibVersion.setDescription("MIB's version, the current version is 1.")
rlMirMaxNumOfMRIsAfterReset = MibScalar((1, 3, 6, 1, 4, 1, 89, 61, 2), Integer32().clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirMaxNumOfMRIsAfterReset.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirMaxNumOfMRIsAfterReset.setDescription('The maximun instanses of IP Multi Instance Routers after the following reset.')
rlMirMaxNumOfMRIs = MibScalar((1, 3, 6, 1, 4, 1, 89, 61, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMirMaxNumOfMRIs.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirMaxNumOfMRIs.setDescription('The maximun instanses of IP Multi Instance Routers.')
rlMirCurMriNum = MibScalar((1, 3, 6, 1, 4, 1, 89, 61, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirCurMriNum.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirCurMriNum.setDescription('The number of the MRI of which MIB variables are treated at the same time by the SNMP agent.')
rlMirInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 89, 61, 5), )
if mibBuilder.loadTexts: rlMirInterfaceTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirInterfaceTable.setDescription('A list of the assignment ifinterfaces to IP Router instances.')
rlMirInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 61, 5, 1), ).setIndexNames((0, "RADLAN-MIB", "rlMirInterfaceIfIndex"))
if mibBuilder.loadTexts: rlMirInterfaceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirInterfaceEntry.setDescription('An entry of this table specifies the MRID assignement to a L2 interfrace.')
rlMirInterfaceIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 61, 5, 1, 1), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirInterfaceIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirInterfaceIfIndex.setDescription('The L2 interface for which this entry contains information.')
rlMirInterfaceMrid = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 61, 5, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirInterfaceMrid.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirInterfaceMrid.setDescription('Multi IP Router Instance Identifier to which the interface is assgned.')
rlMirVlanBaseReservedPortsTable = MibTable((1, 3, 6, 1, 4, 1, 89, 61, 6), )
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsTable.setDescription('A list VLAN based reserved physical ports.')
rlMirVlanBaseReservedPortsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 61, 6, 1), ).setIndexNames((0, "RADLAN-MIB", "rlMirVlanBaseReservedPortsIfIndex"))
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsEntry.setDescription('A VLAN based reserved physical port.')
rlMirVlanBaseReservedPortsIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 61, 6, 1, 1), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsIfIndex.setDescription('IfIndex of VLAN based reserved physical port.')
rlMirVlanBaseReservedPortsStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 61, 6, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirVlanBaseReservedPortsStatus.setDescription('It is used to delete an entry')
rlMirVlanBaseLogicalPortsTable = MibTable((1, 3, 6, 1, 4, 1, 89, 61, 7), )
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsTable.setDescription('A list VLAN based logical ports.')
rlMirVlanBaseLogicalPortsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 61, 7, 1), ).setIndexNames((0, "RADLAN-MIB", "rlMirVlanBaseLogicalPortsIfIndex"))
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsEntry.setDescription('A VLAN based logical point-to-point port.')
rlMirVlanBaseLogicalPortsIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 61, 7, 1, 1), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsIfIndex.setDescription('IfIndex of VLAN based Logical point-to-point port.')
rlMirVlanBaseLogicalPortsReservedIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 61, 7, 1, 2), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsReservedIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsReservedIfIndex.setDescription('IfIndex of VLAN based reserved physical port on which the logical port is defined.')
rlMirVlanBaseLogicalPortsVlanTag = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 61, 7, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsVlanTag.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsVlanTag.setDescription('VLAN tag.')
rlMirVlanBaseLogicalPortsStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 61, 7, 1, 4), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rlMirVlanBaseLogicalPortsStatus.setDescription('It is used to add, update and delete an entry')
rlIpMRouteStdMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 62))
rl3sw2swTables = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 63))
rl3sw2swTablesPollingInterval = MibScalar((1, 3, 6, 1, 4, 1, 89, 63, 1), Integer32().clone(3)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rl3sw2swTablesPollingInterval.setStatus('mandatory')
if mibBuilder.loadTexts: rl3sw2swTablesPollingInterval.setDescription('The polling interval for dynamic 3SW/2SW tables in seconds.')
rlGvrp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 64))
rlPortGvrpTimersTable = MibTable((1, 3, 6, 1, 4, 1, 89, 64, 1), )
if mibBuilder.loadTexts: rlPortGvrpTimersTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpTimersTable.setDescription('A table of GVRP timer values for every bridge port. This is indexed by dot1dBasePort.')
rlPortGvrpTimersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 64, 1, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: rlPortGvrpTimersEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpTimersEntry.setDescription('GVRP timer values for a bridge port.')
rlPortGvrpJoinTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 1, 1, 1), TimeInterval().clone(20)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGvrpJoinTime.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpJoinTime.setDescription('The GVRP Join time, in centiseconds.')
rlPortGvrpLeaveTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 1, 1, 2), TimeInterval().clone(60)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGvrpLeaveTime.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpLeaveTime.setDescription('The GVRP Leave time, in centiseconds.')
rlPortGvrpLeaveAllTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 1, 1, 3), TimeInterval().clone(1000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGvrpLeaveAllTime.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpLeaveAllTime.setDescription('The GVRP LeaveAll time, in centiseconds.')
rlPortGvrpOverrideGarp = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 1, 1, 4), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGvrpOverrideGarp.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpOverrideGarp.setDescription('If enabled{1}, GVRP timer values for this port are determined by the values in this entry. Otherwise, they are determined by the values in dot1dPortGarpTable.')
rlGvrpSupported = MibScalar((1, 3, 6, 1, 4, 1, 89, 64, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlGvrpSupported.setStatus('mandatory')
if mibBuilder.loadTexts: rlGvrpSupported.setDescription('Is GVRP supported in this device or not')
rlGvrpMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 64, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlGvrpMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlGvrpMibVersion.setDescription("MIB's version, the current version is 3.")
rlPortGvrpStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 89, 64, 4), )
if mibBuilder.loadTexts: rlPortGvrpStatisticsTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsTable.setDescription('A table of GVRP statistics values for every bridge port. This is indexed by dot1dBasePort.')
rlPortGvrpStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 64, 4, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: rlPortGvrpStatisticsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsEntry.setDescription('GVRP statistics values for a bridge port.')
rlPortGvrpStatisticsRJE = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsRJE.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsRJE.setDescription('The number of Join Empty Received on the port, since the last clearance.')
rlPortGvrpStatisticsRJIn = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsRJIn.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsRJIn.setDescription('The number of Join In Received on the port, since the last clearance.')
rlPortGvrpStatisticsREmp = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsREmp.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsREmp.setDescription('The number of Empty Received on the port, since the last clearance.')
rlPortGvrpStatisticsRLIn = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsRLIn.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsRLIn.setDescription('The number of Leave In Received on the port, since the last clearance.')
rlPortGvrpStatisticsRLE = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsRLE.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsRLE.setDescription('The number of Leave Empty Received on the port, since the last clearance.')
rlPortGvrpStatisticsRLA = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsRLA.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsRLA.setDescription('The number of Leave All Received on the port, since the last clearance.')
rlPortGvrpStatisticsSJE = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsSJE.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsSJE.setDescription('The number of Join Empty Sent on the port, since the last clearance.')
rlPortGvrpStatisticsSJIn = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsSJIn.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsSJIn.setDescription('The number of Join In Sent on the port, since the last clearance.')
rlPortGvrpStatisticsSEmp = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsSEmp.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsSEmp.setDescription('The number of Empty Sent on the port, since the last clearance.')
rlPortGvrpStatisticsSLIn = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsSLIn.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsSLIn.setDescription('The number of Leave In Sent on the port, since the last clearance.')
rlPortGvrpStatisticsSLE = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsSLE.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsSLE.setDescription('The number of Leave Empty Sent on the port, since the last clearance.')
rlPortGvrpStatisticsSLA = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpStatisticsSLA.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsSLA.setDescription('The number of Leave All Sent on the port, since the last clearance.')
rlPortGvrpStatisticsClear = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 4, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("activate", 1), ("passive", 2))).clone('passive')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGvrpStatisticsClear.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpStatisticsClear.setDescription("activate - the signal/trigger to clear ALL the rlPortGvrpStatisticsEntry's fields. passive - specify that the rlPortGvrpStatisticsClear field is not activate, or finshed the clearnce process. the rlPortGvrpStatisticsClear return automatically to passive after it was activate.")
rlPortGvrpErrorStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 89, 64, 5), )
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsTable.setDescription('A table of GVRP ErrorStatistics values for every bridge port. This is indexed by dot1dBasePort.')
rlPortGvrpErrorStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 64, 5, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsEntry.setDescription('GVRP ErrorStatistics values for a bridge port.')
rlPortGvrpErrorStatisticsInvProt = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 5, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvProt.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvProt.setDescription('The number of Invalid Protocol Id encountered by the GVRP port. (since the last clearance)')
rlPortGvrpErrorStatisticsInvAtyp = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 5, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvAtyp.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvAtyp.setDescription('The number of Invalid Attribute Type encountered by the GVRP port. (since the last clearance)')
rlPortGvrpErrorStatisticsInvAval = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 5, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvAval.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvAval.setDescription('The number of Invalid Attribute Value encountered by the GVRP port. (since the last clearance)')
rlPortGvrpErrorStatisticsInvPlen = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 5, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvPlen.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvPlen.setDescription('The number of Invalid PDU Length encountered by the GVRP port. (since the last clearance)')
rlPortGvrpErrorStatisticsInvAlen = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 5, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvAlen.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvAlen.setDescription('The number of Invalid Attribute Length encountered by the GVRP port. (since the last clearance)')
rlPortGvrpErrorStatisticsInvEvent = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 5, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvEvent.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsInvEvent.setDescription('The number of Invalid Event encountered by the GVRP port. (since the last clearance)')
rlPortGvrpErrorStatisticsClear = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 5, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("activate", 1), ("passive", 2))).clone('passive')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsClear.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpErrorStatisticsClear.setDescription("activate - the signal/trigger to clear ALL the rlPortGvrpErrorStatisticsEntry's fields. passive - specify that the rlPortGvrpErrorStatisticsClear field is not activate, or finshed the clearnce process. the rlPortGvrpErrorStatisticsClear return automatically to passive after it was activate.")
rlPortGvrpApplicantStatusTable = MibTable((1, 3, 6, 1, 4, 1, 89, 64, 6), )
if mibBuilder.loadTexts: rlPortGvrpApplicantStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpApplicantStatusTable.setDescription('A table of GVRP Applicant Status values for every bridge port. This is indexed by dot1dBasePort.')
rlPortGvrpApplicantStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 64, 6, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: rlPortGvrpApplicantStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpApplicantStatusEntry.setDescription('GVRP Applicant Status value for a bridge port.')
rlPortGvrpApplicantStatusValue = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("participant", 1), ("nonParticipant", 2))).clone('participant')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGvrpApplicantStatusValue.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpApplicantStatusValue.setDescription('participant - the Gvrp enabled port sending GARP PDUs. nonParticipant - preventing the Gvrp enabled port from sending GARP PDUs.')
rlPortGvrpRegistrationModeTable = MibTable((1, 3, 6, 1, 4, 1, 89, 64, 8), )
if mibBuilder.loadTexts: rlPortGvrpRegistrationModeTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpRegistrationModeTable.setDescription('A table of GVRP Registration Mode values for every bridge port. This is indexed by dot1dBasePort.')
rlPortGvrpRegistrationModeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 64, 8, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: rlPortGvrpRegistrationModeEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpRegistrationModeEntry.setDescription('GVRP Registration Mode value for a bridge port.')
rlPortGvrpRegistrationModeForbidden = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 64, 8, 1, 1), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGvrpRegistrationModeForbidden.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGvrpRegistrationModeForbidden.setDescription('true - deregisters all VLANS on the specified port and prevents any VLAN creation or registration on that port. false - the registration operation on this port behaves normally.')
rlDot3adAgg = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 65))
rlEmbWeb = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 66))
rlSwPackageVersion = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 67))
rlSwPackageVersionTable = MibTable((1, 3, 6, 1, 4, 1, 89, 67, 1), )
if mibBuilder.loadTexts: rlSwPackageVersionTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlSwPackageVersionTable.setDescription('The table listing the current versions of packages that are included in the running software.')
rlSwPackageVersionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 67, 1, 1), ).setIndexNames((1, "RADLAN-MIB", "rlSwPackageVersionName"))
if mibBuilder.loadTexts: rlSwPackageVersionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlSwPackageVersionEntry.setDescription('The row definition for this table.')
rlSwPackageVersionName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 67, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSwPackageVersionName.setStatus('mandatory')
if mibBuilder.loadTexts: rlSwPackageVersionName.setDescription('The Package name.')
rlSwPackageVersionVesrion = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 67, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSwPackageVersionVesrion.setStatus('mandatory')
if mibBuilder.loadTexts: rlSwPackageVersionVesrion.setDescription('The Package version.')
rlBroadcom = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 68))
rlMultiSessionTerminal = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 69))
rlTerminalDebugModePassword = MibScalar((1, 3, 6, 1, 4, 1, 89, 69, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlTerminalDebugModePassword.setStatus('mandatory')
if mibBuilder.loadTexts: rlTerminalDebugModePassword.setDescription('When a user wants to change the terminal mode from debug mode to ASCII he must enter this password first')
rlRCli = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 70))
rlRCliMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 70, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlRCliMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlRCliMibVersion.setDescription("MIB's version, the current version is 1.")
rlRCliUserPassword = MibScalar((1, 3, 6, 1, 4, 1, 89, 70, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlRCliUserPassword.setStatus('mandatory')
if mibBuilder.loadTexts: rlRCliUserPassword.setDescription('RCLI User mode Password')
rlRCliEnablePassword = MibScalar((1, 3, 6, 1, 4, 1, 89, 70, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlRCliEnablePassword.setStatus('mandatory')
if mibBuilder.loadTexts: rlRCliEnablePassword.setDescription('RCLI Enable mode Password')
rlRCliConfigPassword = MibScalar((1, 3, 6, 1, 4, 1, 89, 70, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlRCliConfigPassword.setStatus('mandatory')
if mibBuilder.loadTexts: rlRCliConfigPassword.setDescription('RCLI Config mode Password')
rlRCliTimer = MibScalar((1, 3, 6, 1, 4, 1, 89, 70, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 3600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlRCliTimer.setStatus('mandatory')
if mibBuilder.loadTexts: rlRCliTimer.setDescription('RCLI Timeout')
rlRcliFileAction = MibScalar((1, 3, 6, 1, 4, 1, 89, 70, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notUsedAfterReset", 1), ("usedAfterReset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlRcliFileAction.setStatus('mandatory')
if mibBuilder.loadTexts: rlRcliFileAction.setDescription('RCLI Configuration File action determines whether The device configuration after reset will be set by the configuration file content or not.')
rlBgp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 71))
rlAgentsCapabilitiesGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 72))
rlAggregateVlan = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 73))
rlAggregateVlanMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 73, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlAggregateVlanMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateVlanMibVersion.setDescription("MIB's version, the current version is 1.")
rlAggregateVlanTable = MibTable((1, 3, 6, 1, 4, 1, 89, 73, 2), )
if mibBuilder.loadTexts: rlAggregateVlanTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateVlanTable.setDescription('The table creates an aggregateVlans, the IfIndex is from 10000')
rlAggregateVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 73, 2, 1), ).setIndexNames((0, "RADLAN-MIB", "rlAggregateVlanIndex"))
if mibBuilder.loadTexts: rlAggregateVlanEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateVlanEntry.setDescription('The row definition for this table.')
rlAggregateVlanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: rlAggregateVlanIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateVlanIndex.setDescription('This index indicate the aggrigateVlan id, the aggregate vlan index is starting from 10000 ')
rlAggregateVlanName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlAggregateVlanName.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateVlanName.setDescription('The name of the aggregateVlan ')
rlAggregateVlanPhysAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("default", 1), ("reserve", 2))).clone('default')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlAggregateVlanPhysAddressType.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateVlanPhysAddressType.setDescription(' This variable indicates whether the physical address assigned to this VLAN should be the default one or be chosen from the set of reserved physical addresses of the device.')
rlAggregateVlanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 4), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlAggregateVlanStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateVlanStatus.setDescription("The status of the aggregateVlan table entry. It's used to delete an entry")
rlAggregateSubVlanTable = MibTable((1, 3, 6, 1, 4, 1, 89, 73, 3), )
if mibBuilder.loadTexts: rlAggregateSubVlanTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateSubVlanTable.setDescription('The table indicates all the allocated sub-vlans to the aggregateVlans, an entry in the rlAggregateVlanTable must be exist before allocating the subVlans')
rlAggregateSubVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 73, 3, 1), ).setIndexNames((0, "RADLAN-MIB", "rlAggregateVlanIndex"), (0, "RADLAN-MIB", "rlAggregateSubVlanIfIndex"))
if mibBuilder.loadTexts: rlAggregateSubVlanEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateSubVlanEntry.setDescription('The row definition for this table.')
rlAggregateSubVlanIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 3, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlAggregateSubVlanIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateSubVlanIfIndex.setDescription('Indicate the subVlan that allocated to the aggregate vlan')
rlAggregateSubVlanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 3, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlAggregateSubVlanStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateSubVlanStatus.setDescription("The status of the aggregateSubVlan table entry. It's used to delete an entry")
rlAggregateVlanArpProxy = MibScalar((1, 3, 6, 1, 4, 1, 89, 73, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlAggregateVlanArpProxy.setStatus('mandatory')
if mibBuilder.loadTexts: rlAggregateVlanArpProxy.setDescription('When ARP Proxy is enabled, the router can respond to ARP requests for nodes located on different sub-vlans, which belong to the same Super VLAN.The router responds with its own MAC address. When ARP Proxy is disabled, the router responds only to ARP requests for its own IP addresses.')
rlGmrp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 75))
rlGmrpSupported = MibScalar((1, 3, 6, 1, 4, 1, 89, 75, 1), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlGmrpSupported.setStatus('mandatory')
if mibBuilder.loadTexts: rlGmrpSupported.setDescription('Is Gmrp supported in the device or not.')
rlGmrpMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 75, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlGmrpMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlGmrpMibVersion.setDescription("MIB's version, the current version is 1.")
rlPortGmrpTimersTable = MibTable((1, 3, 6, 1, 4, 1, 89, 75, 3), )
if mibBuilder.loadTexts: rlPortGmrpTimersTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGmrpTimersTable.setDescription('A table of Gmrp timer values for every bridge port. This is indexed by dot1dBasePort.')
rlPortGmrpTimersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 75, 3, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: rlPortGmrpTimersEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGmrpTimersEntry.setDescription('Gmrp timer values for a bridge port.')
rlPortGmrpJoinTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 75, 3, 1, 1), TimeInterval().clone(20)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGmrpJoinTime.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGmrpJoinTime.setDescription('The Gmrp Join time, in centiseconds.')
rlPortGmrpLeaveTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 75, 3, 1, 2), TimeInterval().clone(60)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGmrpLeaveTime.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGmrpLeaveTime.setDescription('The Gmrp Leave time, in centiseconds.')
rlPortGmrpLeaveAllTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 75, 3, 1, 3), TimeInterval().clone(1000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGmrpLeaveAllTime.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGmrpLeaveAllTime.setDescription('The Gmrp LeaveAll time, in centiseconds.')
rlPortGmrpOverrideGarp = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 75, 3, 1, 4), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortGmrpOverrideGarp.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortGmrpOverrideGarp.setDescription('If enabled{1}, Gmrp timer values for this port are determined by the values in this entry. Otherwise, they are determined by the values in dot1dPortGarpTable.')
rlGmrpVlanTable = MibTable((1, 3, 6, 1, 4, 1, 89, 75, 4), )
if mibBuilder.loadTexts: rlGmrpVlanTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlGmrpVlanTable.setDescription('This table is to Enable/Disable Gmrp in the vlan.')
rlGmrpVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 75, 4, 1), ).setIndexNames((0, "RADLAN-MIB", "rlGmrpVlanTag"))
if mibBuilder.loadTexts: rlGmrpVlanEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlGmrpVlanEntry.setDescription('An entry in the rlGmrpVlanTable.')
rlGmrpVlanTag = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 75, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlGmrpVlanTag.setStatus('mandatory')
if mibBuilder.loadTexts: rlGmrpVlanTag.setDescription('The tag of the VLAN for which this entry contains information.')
rlGmrpVlanEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 75, 4, 1, 2), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlGmrpVlanEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rlGmrpVlanEnable.setDescription('Enable/Disable Gmrp in the vlan.')
rlDhcpCl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 76))
rlDhcpClActionTable = MibTable((1, 3, 6, 1, 4, 1, 89, 76, 3), )
if mibBuilder.loadTexts: rlDhcpClActionTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpClActionTable.setDescription(' The (conceptual) table mentione IP address which must be released/renewed on the interface.')
rlDhcpClActionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 76, 3, 1), ).setIndexNames((0, "RADLAN-MIB", "rlDhcpClActionIfIndex"))
if mibBuilder.loadTexts: rlDhcpClActionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpClActionEntry.setDescription('An entry (conceptual row) in dhcpClActionTable.')
rlDhcpClActionIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 3, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlDhcpClActionIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpClActionIfIndex.setDescription(' The interface which the action is implemented for or NULL if it implemented for all device. ')
rlDhcpClActionStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 3, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlDhcpClActionStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpClActionStatus.setDescription('The status of this entry. Creating the entry renewing Dhcp address on the interface; destroying the entry release Dhcp address on the interface.')
rlDhcpClActionHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 3, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlDhcpClActionHostName.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpClActionHostName.setDescription(' This option specifies the name of the client.')
rlDhcpApprovalEnabled = MibScalar((1, 3, 6, 1, 4, 1, 89, 76, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlDhcpApprovalEnabled.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalEnabled.setDescription('DHCP Approval feature status - enable (True) or desable (False). Must be True only if DHCP Approval supported, device has only one ip interface and default ip exist.')
rlDhcpApprovalWaitingTable = MibTable((1, 3, 6, 1, 4, 1, 89, 76, 5), )
if mibBuilder.loadTexts: rlDhcpApprovalWaitingTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalWaitingTable.setDescription('IP addresses waiting for approval.')
rlDhcpApprovalWaitingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 76, 5, 1), ).setIndexNames((0, "RADLAN-MIB", "rlDhcpApprovalWaitingIfIndex"))
if mibBuilder.loadTexts: rlDhcpApprovalWaitingEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalWaitingEntry.setDescription('An entry in rlDhcpApprovalWaitingTable.')
rlDhcpApprovalWaitingIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 5, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlDhcpApprovalWaitingIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalWaitingIfIndex.setDescription('IP interface ifIndex.')
rlDhcpApprovalWaitingAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 5, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlDhcpApprovalWaitingAddress.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalWaitingAddress.setDescription('IP Address waiting for approval.')
rlDhcpApprovalWaitingMask = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 5, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlDhcpApprovalWaitingMask.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalWaitingMask.setDescription('Mask waiting for approval.')
rlDhcpApprovalWaitingGateway = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 5, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlDhcpApprovalWaitingGateway.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalWaitingGateway.setDescription('Default gateway of received address.')
rlDhcpApprovalActionTable = MibTable((1, 3, 6, 1, 4, 1, 89, 76, 6), )
if mibBuilder.loadTexts: rlDhcpApprovalActionTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalActionTable.setDescription('Action for waiting ip address (approve/decline).')
rlDhcpApprovalActionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 76, 6, 1), ).setIndexNames((0, "RADLAN-MIB", "rlDhcpApprovalActionIfIndex"), (0, "RADLAN-MIB", "rlDhcpApprovalActionAddress"), (0, "RADLAN-MIB", "rlDhcpApprovalActionMask"))
if mibBuilder.loadTexts: rlDhcpApprovalActionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalActionEntry.setDescription('An entry in rlDhcpApprovalActionTable.')
rlDhcpApprovalActionIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 6, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlDhcpApprovalActionIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalActionIfIndex.setDescription('IP interface ifIndex.')
rlDhcpApprovalActionAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 6, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlDhcpApprovalActionAddress.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalActionAddress.setDescription('IP Address.')
rlDhcpApprovalActionMask = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 6, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlDhcpApprovalActionMask.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalActionMask.setDescription('IP Address mask.')
rlDhcpApprovalActionApprove = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 6, 1, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlDhcpApprovalActionApprove.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpApprovalActionApprove.setDescription('Approve or decline ip address.')
rlDhcpClCommandTable = MibTable((1, 3, 6, 1, 4, 1, 89, 76, 7), ).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlDhcpClCommandTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpClCommandTable.setDescription('Action MIB for DHCP Renew command.')
rlDhcpClCommandEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 76, 7, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlDhcpClCommandEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpClCommandEntry.setDescription('The row definition for this table.')
rlDhcpClCommandAction = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 76, 7, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("none", 0), ("renew", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlDhcpClCommandAction.setStatus('mandatory')
if mibBuilder.loadTexts: rlDhcpClCommandAction.setDescription('Action to apply.')
rlStormCtrl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 77))
rlStormCtrlSupport = MibScalar((1, 3, 6, 1, 4, 1, 89, 77, 1), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStormCtrlSupport.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlSupport.setDescription('Identify if the strom control protection is supported')
rlStormCtrlMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 77, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStormCtrlMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlMibVersion.setDescription("MIB's version, the current version is 3.")
rlStormCtrlRateUnitTypeSupport = MibScalar((1, 3, 6, 1, 4, 1, 89, 77, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStormCtrlRateUnitTypeSupport.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlRateUnitTypeSupport.setDescription('the supported rate unit type for the storm rate control')
rlStormCtrlTypeSupport = MibScalar((1, 3, 6, 1, 4, 1, 89, 77, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStormCtrlTypeSupport.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlTypeSupport.setDescription('the supported frame type for the storm control protection')
rlStormCtrlRateSupportPerType = MibScalar((1, 3, 6, 1, 4, 1, 89, 77, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStormCtrlRateSupportPerType.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlRateSupportPerType.setDescription('identify if rate control is supported for each frame type')
rlStormCtrlEnbaleDependencyBetweenTypes = MibScalar((1, 3, 6, 1, 4, 1, 89, 77, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStormCtrlEnbaleDependencyBetweenTypes.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlEnbaleDependencyBetweenTypes.setDescription('indicate enable limitation of dependency between frame types, such as enabling of multicast should be with the enabling of broadcast type (bcm 5632)')
rlStormCtrlRateDependencyBetweenTypes = MibScalar((1, 3, 6, 1, 4, 1, 89, 77, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStormCtrlRateDependencyBetweenTypes.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlRateDependencyBetweenTypes.setDescription('indicate limitation of dependency between frame types for rate assignment, for example: assigning of rate limit for unicast frame must assigning the same rate for multicast and bradcast frame (bcm 5615), in case the device support enbale per each frame type but with the same rate limitation.')
rlStormCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 89, 77, 8), )
if mibBuilder.loadTexts: rlStormCtrlTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlTable.setDescription('The table contains the storm control protection per port')
rlStormCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 77, 8, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: rlStormCtrlEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlEntry.setDescription('storm control protection, defined per port,frame type and rate')
rlStormCtrlRateType = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 1), RlStormCtrlRateUnit()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlRateType.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlRateType.setDescription('indicate the rate unit type')
rlStormCtrlUnknownUnicastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 2), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlUnknownUnicastEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlUnknownUnicastEnable.setDescription('enable or disable the storm control for unknown unicast frames')
rlStormCtrlUnknownUnicastRate = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 3), Unsigned_32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlUnknownUnicastRate.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlUnknownUnicastRate.setDescription('set the storm control rate limit for the unknown unicast frames, 0 indicate blocking of frames from this type.')
rlStormCtrlUnknownMulticastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 4), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlUnknownMulticastEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlUnknownMulticastEnable.setDescription('enable or disable the storm control for unknown multicast frames')
rlStormCtrlUnknownMulticastRate = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 5), Unsigned_32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlUnknownMulticastRate.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlUnknownMulticastRate.setDescription('set the storm control rate limit for the unknown multicast frames, 0 indicate blocking of frames from this type.')
rlStormCtrlBroadcastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 6), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlBroadcastEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlBroadcastEnable.setDescription('enable or disable the storm control for Broadcast frames')
rlStormCtrlBroadcastRate = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 7), Unsigned_32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlBroadcastRate.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlBroadcastRate.setDescription('set the storm control rate limit for the Broadcast frames, 0 indicate blocking of frames from this type.')
rlStormCtrlMulticastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 8), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlMulticastEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlMulticastEnable.setDescription('enable or disable the storm control for multicast frames')
rlStormCtrlMulticastRate = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 9), Unsigned_32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlMulticastRate.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlMulticastRate.setDescription('set the storm control rate limit for the multicast frames, 0 indicate blocking of frames from this type.')
rlStormCtrlSetDefaultRateType = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 10), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlSetDefaultRateType.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlSetDefaultRateType.setDescription('indicate if return the rate unit type to its default.')
rlStormCtrlSetDefaultUnknownUnicastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 11), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlSetDefaultUnknownUnicastEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlSetDefaultUnknownUnicastEnable.setDescription('indicate if return the storm control enable for unknown unicast frames to its default.')
rlStormCtrlSetDefaultUnknownUnicastRate = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 12), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlSetDefaultUnknownUnicastRate.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlSetDefaultUnknownUnicastRate.setDescription('indicate if return the storm control rate limit for the unknown unicast frames to its default.')
rlStormCtrlSetDefaultUnknownMulticastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 13), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlSetDefaultUnknownMulticastEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlSetDefaultUnknownMulticastEnable.setDescription('indicate if return the storm control enable for unknown multicast frames to its default.')
rlStormCtrlSetDefaultUnknownMulticastRate = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 14), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlSetDefaultUnknownMulticastRate.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlSetDefaultUnknownMulticastRate.setDescription('indicate if return the storm control rate limit for the unknown multicast frames to its default.')
rlStormCtrlSetDefaultBroadcastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 15), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlSetDefaultBroadcastEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlSetDefaultBroadcastEnable.setDescription('indicate if return the storm control enable for Broadcast frames to its default.')
rlStormCtrlSetDefaultBroadcastRate = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 16), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlSetDefaultBroadcastRate.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlSetDefaultBroadcastRate.setDescription('indicate if return the storm control rate limit for the Broadcast frames to its default.')
rlStormCtrlSetDefaultMulticastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 17), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlSetDefaultMulticastEnable.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlSetDefaultMulticastEnable.setDescription('indicate if return the storm control for multicast frames to its default.')
rlStormCtrlSetDefaultMulticastRate = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 8, 1, 18), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlStormCtrlSetDefaultMulticastRate.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlSetDefaultMulticastRate.setDescription('indicate if return the storm control rate limit for the multicast frames to its default.')
rlStormCtrlGroupTable = MibTable((1, 3, 6, 1, 4, 1, 89, 77, 9), )
if mibBuilder.loadTexts: rlStormCtrlGroupTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlGroupTable.setDescription('The table contains per port for each supported frame type to which group it belongs.')
rlStormCtrlGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 77, 9, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: rlStormCtrlGroupEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlGroupEntry.setDescription('group id for each supported frame type defined per port.')
rlStormCtrlGroupUnknownUnicastId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 9, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStormCtrlGroupUnknownUnicastId.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlGroupUnknownUnicastId.setDescription('Indicates the id of the group for unknown unicast frame type that the port belongs to, 0 indicates that unknown unicast frame type is not supported.')
rlStormCtrlGroupUnknownMulticastId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 9, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStormCtrlGroupUnknownMulticastId.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlGroupUnknownMulticastId.setDescription('Indicates the id of the group for unknown multicast frame type that the port belongs to, 0 indicates that unknown multicast frame type is not supported.')
rlStormCtrlGroupBroadcastId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 9, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStormCtrlGroupBroadcastId.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlGroupBroadcastId.setDescription('Indicates the id of the group for broadcast frame type that the port belongs to, 0 indicates that broadcast frame type is not supported.')
rlStormCtrlGroupMulticastId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 77, 9, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStormCtrlGroupMulticastId.setStatus('mandatory')
if mibBuilder.loadTexts: rlStormCtrlGroupMulticastId.setDescription('Indicates the id of the group for multicast frame type that the port belongs to, 0 indicates that multicast frame type is not supported.')
rlSsh = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 78))
rlAAA = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 79))
rlRadius = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 80))
rlTraceRoute = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 81))
rlTraceRouteMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 81, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlTraceRouteMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlTraceRouteMibVersion.setDescription("MIB's version, the current version is 1.")
rlSyslog = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 82))
rlEnv = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 83))
rlSmon = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 84))
rlPortCopyMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 84, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortCopyMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortCopyMibVersion.setDescription("MIB's version, the current version is 1.")
rlPortCopySupport = MibScalar((1, 3, 6, 1, 4, 1, 89, 84, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("supported", 1), ("notSupported", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlPortCopySupport.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortCopySupport.setDescription('supported - The standard portCopy is supported. notSupported - the standard portCopy is not supported. only basic portCopy operation is supported. ')
rlPortCopyVlanTaggingTable = MibTable((1, 3, 6, 1, 4, 1, 89, 84, 3), )
if mibBuilder.loadTexts: rlPortCopyVlanTaggingTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortCopyVlanTaggingTable.setDescription('A supplementing table for portCopyTable. For every portCopyDest a vlan-tagging option is available.')
rlPortCopyVlanTaggingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 84, 3, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: rlPortCopyVlanTaggingEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortCopyVlanTaggingEntry.setDescription('Each entry specify how mirrored packets will transmit from the portCopyDest: Tagged or unTagged. The values in this entry will be valid only when the dot1dBasePort will be configured as a portCopyDest in the portCopyTable.')
rlPortCopyVlanTagging = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 84, 3, 1, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlPortCopyVlanTagging.setStatus('mandatory')
if mibBuilder.loadTexts: rlPortCopyVlanTagging.setDescription('TRUE - Mirrored packets will transmit from portCopyDest - Tagged FALSE - Mirrored packets will transmit from portCopyDest - unTagged')
rlSocket = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 85))
rlSocketMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 85, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSocketMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rlSocketMibVersion.setDescription("MIB's version, the current version is 1.")
rlSocketTable = MibTable((1, 3, 6, 1, 4, 1, 89, 85, 2), )
if mibBuilder.loadTexts: rlSocketTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlSocketTable.setDescription('The (conceptual) table listing the sockets which are currently open in the system.')
rlSocketEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 85, 2, 1), ).setIndexNames((0, "RADLAN-MIB", "rlSocketId"))
if mibBuilder.loadTexts: rlSocketEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlSocketEntry.setDescription('An entry (conceptual row) in the SocketTable.')
rlSocketId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 85, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSocketId.setStatus('mandatory')
if mibBuilder.loadTexts: rlSocketId.setDescription('The value of the id of the socket. ')
rlSocketType = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 85, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("stream", 1), ("dgram", 2), ("raw", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSocketType.setStatus('mandatory')
if mibBuilder.loadTexts: rlSocketType.setDescription('Specifies the type of the socket. ')
rlSocketState = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 85, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("connected", 1), ("notConnected", 2), ("recvClosed", 3), ("sendClosed", 4), ("closed", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSocketState.setStatus('mandatory')
if mibBuilder.loadTexts: rlSocketState.setDescription('Specifies the state in which the socket is in. ')
rlSocketBlockMode = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 85, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("blocking", 1), ("nonBlocking", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSocketBlockMode.setStatus('mandatory')
if mibBuilder.loadTexts: rlSocketBlockMode.setDescription('Specifies the blocking mode of the socket. ')
rlSocketUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 85, 2, 1, 5), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlSocketUpTime.setStatus('mandatory')
if mibBuilder.loadTexts: rlSocketUpTime.setDescription('The time elapsed since this socket was created.')
rlDigitalKeyManage = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 86))
rlMD5KeyChainTable = MibTable((1, 3, 6, 1, 4, 1, 89, 86, 1), )
if mibBuilder.loadTexts: rlMD5KeyChainTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyChainTable.setDescription("Key-chains and key ID's")
rlMD5KeyChainEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 86, 1, 1), ).setIndexNames((0, "RADLAN-MIB", "rlMD5KeyChainName"), (0, "RADLAN-MIB", "rlMD5KeyChainKeyId"))
if mibBuilder.loadTexts: rlMD5KeyChainEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyChainEntry.setDescription('Key-chain with key ID that belongs to this chain')
rlMD5KeyChainName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 86, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMD5KeyChainName.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyChainName.setDescription('Name of the key-chain to which belongs the secret authentication key')
rlMD5KeyChainKeyId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 86, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMD5KeyChainKeyId.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyChainKeyId.setDescription('A 8-bit identifier for the secret authentication key. This identifier is the same as key ID used in rlMD5KeyTable.')
rlMD5KeyChainKeyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 86, 1, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMD5KeyChainKeyRowStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyChainKeyRowStatus.setDescription('It is used to insert, update or delete an entry')
rlMD5KeyTable = MibTable((1, 3, 6, 1, 4, 1, 89, 86, 2), )
if mibBuilder.loadTexts: rlMD5KeyTable.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyTable.setDescription('Secret authentication keys used by MD5 Authentication Algorithm')
rlMD5KeyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 86, 2, 1), ).setIndexNames((0, "RADLAN-MIB", "rlMD5KeyId"))
if mibBuilder.loadTexts: rlMD5KeyEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyEntry.setDescription('Secret authentication key with all related parameters')
rlMD5KeyId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 86, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMD5KeyId.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyId.setDescription('A 8-bit identifier for the secret authentication key')
rlMD5Key = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 86, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMD5Key.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5Key.setDescription('The 128-bit secret authentication key. The key can get value only once and cannot be modified. When read, rlMD5KeyChainKey always returns an Octet String of length zero.')
rlMD5KeyStartAccept = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 86, 2, 1, 3), DateAndTime().clone(hexValue="00000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMD5KeyStartAccept.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyStartAccept.setDescription('The time that the router will start accepting packets that have been created with the given key')
rlMD5KeyStartGenerate = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 86, 2, 1, 4), DateAndTime().clone(hexValue="00000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMD5KeyStartGenerate.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyStartGenerate.setDescription('The time that the router will start using the key for packet generation')
rlMD5KeyStopGenerate = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 86, 2, 1, 5), DateAndTime().clone(hexValue="FFFFFFFF")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMD5KeyStopGenerate.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyStopGenerate.setDescription('The time that the router will stop using the key for packet generation')
rlMD5KeyStopAccept = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 86, 2, 1, 6), DateAndTime().clone(hexValue="FFFFFFFF")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMD5KeyStopAccept.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyStopAccept.setDescription('The time that the router will stop accepting packets that have been created with the given key')
rlMD5KeyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 86, 2, 1, 7), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMD5KeyRowStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rlMD5KeyRowStatus.setDescription('It is used to insert, update or delete an entry')
rlCopy = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 87))
rlQosCliMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 88))
rlMngInf = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 89))
rlPhy = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 90))
rlJumboFrames = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 91))
rlJumboFramesCurrentStatus = MibScalar((1, 3, 6, 1, 4, 1, 89, 91, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlJumboFramesCurrentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rlJumboFramesCurrentStatus.setDescription('Show the current Jumbo Frames status')
rlJumboFramesStatusAfterReset = MibScalar((1, 3, 6, 1, 4, 1, 89, 91, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlJumboFramesStatusAfterReset.setStatus('mandatory')
if mibBuilder.loadTexts: rlJumboFramesStatusAfterReset.setDescription('Set the Jumbo Frames status after reset')
rlTimeSynchronization = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 92))
rlDnsCl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 93))
rlCDB = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 94))
rlStartupCDBChanged = MibScalar((1, 3, 6, 1, 4, 1, 89, 94, 1), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlStartupCDBChanged.setStatus('mandatory')
if mibBuilder.loadTexts: rlStartupCDBChanged.setDescription("Indicates whether the startup CDB has changed between the router's last two reboots")
rlManualReboot = MibScalar((1, 3, 6, 1, 4, 1, 89, 94, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlManualReboot.setStatus('mandatory')
if mibBuilder.loadTexts: rlManualReboot.setDescription('Indicates whether the device was shutdown orderly before reboot or not (i.e. power failure)')
rldot1x = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 95))
rldot1xMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 95, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xMibVersion.setDescription("MIB's version, the current version is 1.")
rldot1xExtAuthSessionStatsTable = MibTable((1, 3, 6, 1, 4, 1, 89, 95, 2), )
if mibBuilder.loadTexts: rldot1xExtAuthSessionStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xExtAuthSessionStatsTable.setDescription('A table that contains the session statistics objects for the Authenticator PAE associated with each Port. An entry appears in this table for each port that may authenticate access to itself.')
rldot1xExtAuthSessionStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 95, 2, 1), )
dot1xAuthSessionStatsEntry.registerAugmentions(("RADLAN-MIB", "rldot1xExtAuthSessionStatsEntry"))
rldot1xExtAuthSessionStatsEntry.setIndexNames(*dot1xAuthSessionStatsEntry.getIndexNames())
if mibBuilder.loadTexts: rldot1xExtAuthSessionStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xExtAuthSessionStatsEntry.setDescription('The session statistics information for an Authenticator PAE. This shows the current values being collected for each session that is still in progress, or the final values for the last valid session on each port where there is no session currently active.')
rlDot1xAuthSessionAuthenticMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("remoteAuthServer", 1), ("localAuthServer", 2), ("none", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlDot1xAuthSessionAuthenticMethod.setStatus('mandatory')
if mibBuilder.loadTexts: rlDot1xAuthSessionAuthenticMethod.setDescription('The authentication method used to establish the session.')
rldot1xGuestVlanSupported = MibScalar((1, 3, 6, 1, 4, 1, 89, 95, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xGuestVlanSupported.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xGuestVlanSupported.setDescription('indicate if guest vlan is supported.')
rldot1xGuestVlanVID = MibScalar((1, 3, 6, 1, 4, 1, 89, 95, 4), VlanIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1xGuestVlanVID.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xGuestVlanVID.setDescription('specify the guest vlan tag , 0 for non exiting.')
rldot1xGuestVlanPorts = MibScalar((1, 3, 6, 1, 4, 1, 89, 95, 5), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1xGuestVlanPorts.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xGuestVlanPorts.setDescription('the ports that can be members in the guest vlan')
rldot1xUnAuthenticatedVlanSupported = MibScalar((1, 3, 6, 1, 4, 1, 89, 95, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xUnAuthenticatedVlanSupported.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xUnAuthenticatedVlanSupported.setDescription('indicate if unauthenticated Vlan is supported.')
rldot1xUnAuthenticatedVlanTable = MibTable((1, 3, 6, 1, 4, 1, 89, 95, 7), )
if mibBuilder.loadTexts: rldot1xUnAuthenticatedVlanTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xUnAuthenticatedVlanTable.setDescription('port belong to vlan in all port authenticated state except force unauthenticated table')
rldot1xUnAuthenticatedVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 95, 7, 1), ).setIndexNames((0, "Q-BRIDGE-MIB", "dot1qFdbId"))
if mibBuilder.loadTexts: rldot1xUnAuthenticatedVlanEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xUnAuthenticatedVlanEntry.setDescription(' port belong to vlan in all port authenticated state except force unauthenticated entry')
rldot1xUnAuthenticatedVlanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 7, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1xUnAuthenticatedVlanStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xUnAuthenticatedVlanStatus.setDescription('The row status variable, used according to row installation and removal conventions.')
rldot1xUserBasedVlanSupported = MibScalar((1, 3, 6, 1, 4, 1, 89, 95, 8), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xUserBasedVlanSupported.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xUserBasedVlanSupported.setDescription('indicate if user based Vlan is supported.')
rldot1xUserBasedVlanPorts = MibScalar((1, 3, 6, 1, 4, 1, 89, 95, 9), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1xUserBasedVlanPorts.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xUserBasedVlanPorts.setDescription('the ports that can be members in the user based vlan')
rldot1xAuthenticationPortTable = MibTable((1, 3, 6, 1, 4, 1, 89, 95, 10), )
if mibBuilder.loadTexts: rldot1xAuthenticationPortTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthenticationPortTable.setDescription('A table of system level information for each port supported by the Port Access Entity. An entry appears in this table for each port of this system.')
rldot1xAuthenticationPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 95, 10, 1), ).setIndexNames((0, "IEEE8021-PAE-MIB", "dot1xPaePortNumber"))
if mibBuilder.loadTexts: rldot1xAuthenticationPortEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthenticationPortEntry.setDescription('The Port number and mac method')
rldot1xAuthenticationPortMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("eapolOnly", 1), ("macAndEapol", 2), ("macOnly", 3))).clone('eapolOnly')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1xAuthenticationPortMethod.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthenticationPortMethod.setDescription('The value of the mac based authenication.')
rldot1xAuthMultiStatsTable = MibTable((1, 3, 6, 1, 4, 1, 89, 95, 11), )
if mibBuilder.loadTexts: rldot1xAuthMultiStatsTable.setReference('9.4.2 Authenticator Statistics')
if mibBuilder.loadTexts: rldot1xAuthMultiStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiStatsTable.setDescription('A table that contains the statistics objects for the Authenticator PAE associated with each Port and MAC for multisession 802.1x mode of operation. An entry appears in this table for each port and MAC that have an authentication session currently running under way for them.')
rldot1xAuthMultiStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 95, 11, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1xAuthMultiStatsPortNumber"), (0, "RADLAN-MIB", "rldot1xAuthMultiStatsSourceMac"))
if mibBuilder.loadTexts: rldot1xAuthMultiStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiStatsEntry.setDescription('The statistics information for an Authenticator PAE.')
rldot1xAuthMultiStatsPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiStatsPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiStatsPortNumber.setDescription('Port Number.')
rldot1xAuthMultiStatsSourceMac = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiStatsSourceMac.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiStatsSourceMac.setDescription('Mac of the authentication session.')
rldot1xAuthMultiEapolFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiEapolFramesRx.setReference('9.4.2, EAPOL frames received')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolFramesRx.setDescription('The number of valid EAPOL frames of any type that have been received by this Authenticator.')
rldot1xAuthMultiEapolFramesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiEapolFramesTx.setReference('9.4.2, EAPOL frames transmitted')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolFramesTx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolFramesTx.setDescription('The number of EAPOL frames of any type that have been transmitted by this Authenticator.')
rldot1xAuthMultiEapolStartFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiEapolStartFramesRx.setReference('9.4.2, EAPOL Start frames received')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolStartFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolStartFramesRx.setDescription('The number of EAPOL Start frames that have been received by this Authenticator.')
rldot1xAuthMultiEapolLogoffFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiEapolLogoffFramesRx.setReference('9.4.2, EAPOL Logoff frames received')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolLogoffFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolLogoffFramesRx.setDescription('The number of EAPOL Logoff frames that have been received by this Authenticator.')
rldot1xAuthMultiEapolRespIdFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiEapolRespIdFramesRx.setReference('9.4.2, EAPOL Resp/Id frames received')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolRespIdFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolRespIdFramesRx.setDescription('The number of EAP Resp/Id frames that have been received by this Authenticator.')
rldot1xAuthMultiEapolRespFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiEapolRespFramesRx.setReference('9.4.2, EAPOL Response frames received')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolRespFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolRespFramesRx.setDescription('The number of valid EAP Response frames (other than Resp/Id frames) that have been received by this Authenticator.')
rldot1xAuthMultiEapolReqIdFramesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiEapolReqIdFramesTx.setReference('9.4.2, EAPOL Req/Id frames transmitted')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolReqIdFramesTx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolReqIdFramesTx.setDescription('The number of EAP Req/Id frames that have been transmitted by this Authenticator.')
rldot1xAuthMultiEapolReqFramesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiEapolReqFramesTx.setReference('9.4.2, EAPOL Request frames transmitted')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolReqFramesTx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiEapolReqFramesTx.setDescription('The number of EAP Request frames (other than Rq/Id frames) that have been transmitted by this Authenticator.')
rldot1xAuthMultiInvalidEapolFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiInvalidEapolFramesRx.setReference('9.4.2, Invalid EAPOL frames received')
if mibBuilder.loadTexts: rldot1xAuthMultiInvalidEapolFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiInvalidEapolFramesRx.setDescription('The number of EAPOL frames that have been received by this Authenticator in which the frame type is not recognized.')
rldot1xAuthMultiEapLengthErrorFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 11, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiEapLengthErrorFramesRx.setReference('9.4.2, EAP length error frames received')
if mibBuilder.loadTexts: rldot1xAuthMultiEapLengthErrorFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiEapLengthErrorFramesRx.setDescription('The number of EAPOL frames that have been received by this Authenticator in which the Packet Body Length field is invalid.')
rldot1xAuthMultiDiagTable = MibTable((1, 3, 6, 1, 4, 1, 89, 95, 12), )
if mibBuilder.loadTexts: rldot1xAuthMultiDiagTable.setReference('9.4.3 Authenticator Diagnostics')
if mibBuilder.loadTexts: rldot1xAuthMultiDiagTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiDiagTable.setDescription('A table that contains the diagnostics objects for the Authenticator PAE associated with each Port and MAC. An entry appears in this table for each port and MAC that have an authentication session currently running under way for them.')
rldot1xAuthMultiDiagEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 95, 12, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1xAuthMultiDiagPortNumber"), (0, "RADLAN-MIB", "rldot1xAuthMultiDiagSourceMac"))
if mibBuilder.loadTexts: rldot1xAuthMultiDiagEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiDiagEntry.setDescription('The diagnostics information for an Authenticator PAE.')
rldot1xAuthMultiDiagPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiDiagPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiDiagPortNumber.setDescription('Port Number.')
rldot1xAuthMultiDiagSourceMac = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiDiagSourceMac.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiDiagSourceMac.setDescription('Mac of the authentication session.')
rldot1xAuthMultiEntersConnecting = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiEntersConnecting.setReference('9.4.2, 8.5.4.2.1')
if mibBuilder.loadTexts: rldot1xAuthMultiEntersConnecting.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiEntersConnecting.setDescription('Counts the number of times that the state machine transitions to the CONNECTING state from any other state.')
rldot1xAuthMultiEntersAuthenticating = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiEntersAuthenticating.setReference('9.4.2, 8.5.4.2.3')
if mibBuilder.loadTexts: rldot1xAuthMultiEntersAuthenticating.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiEntersAuthenticating.setDescription('Counts the number of times that the state machine transitions from CONNECTING to AUTHENTICATING, as a result of an EAP-Response/Identity message being received from the Supplicant.')
rldot1xAuthMultiAuthSuccessWhileAuthenticating = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiAuthSuccessWhileAuthenticating.setReference('9.4.2, 8.5.4.2.4')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthSuccessWhileAuthenticating.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthSuccessWhileAuthenticating.setDescription('Counts the number of times that the state machine transitions from AUTHENTICATING to AUTHENTICATED, as a result of the Backend Authentication state machine indicating successful authentication of the Supplicant (authSuccess = TRUE).')
rldot1xAuthMultiAuthFailWhileAuthenticating = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiAuthFailWhileAuthenticating.setReference('9.4.2, 8.5.4.2.6')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthFailWhileAuthenticating.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthFailWhileAuthenticating.setDescription('Counts the number of times that the state machine transitions from AUTHENTICATING to HELD, as a result of the Backend Authentication state machine indicating authentication failure (authFail = TRUE).')
rldot1xAuthMultiAuthReauthsWhileAuthenticating = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiAuthReauthsWhileAuthenticating.setReference('9.4.2, 8.5.4.2.7')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthReauthsWhileAuthenticating.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthReauthsWhileAuthenticating.setDescription('Counts the number of times that the state machine transitions from AUTHENTICATING to ABORTING, as a result of a reauthentication request (reAuthenticate = TRUE).')
rldot1xAuthMultiAuthEapStartsWhileAuthenticating = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiAuthEapStartsWhileAuthenticating.setReference('9.4.2, 8.5.4.2.8')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthEapStartsWhileAuthenticating.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthEapStartsWhileAuthenticating.setDescription('Counts the number of times that the state machine transitions from AUTHENTICATING to ABORTING, as a result of an EAPOL-Start message being received from the Supplicant.')
rldot1xAuthMultiAuthReauthsWhileAuthenticated = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiAuthReauthsWhileAuthenticated.setReference('9.4.2, 8.5.4.2.10')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthReauthsWhileAuthenticated.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthReauthsWhileAuthenticated.setDescription('Counts the number of times that the state machine transitions from AUTHENTICATED to CONNECTING, as a result of a reauthentication request (reAuthenticate = TRUE).')
rldot1xAuthMultiAuthEapStartsWhileAuthenticated = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiAuthEapStartsWhileAuthenticated.setReference('9.4.2, 8.5.4.2.11')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthEapStartsWhileAuthenticated.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiAuthEapStartsWhileAuthenticated.setDescription('Counts the number of times that the state machine transitions from AUTHENTICATED to CONNECTING, as a result of an EAPOL-Start message being received from the Supplicant.')
rldot1xAuthMultiBackendResponses = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiBackendResponses.setReference('9.4.2, 8.5.6.2.1')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendResponses.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendResponses.setDescription('Counts the number of times that the state machine sends an initial Access-Request packet to the Authentication server (i.e., executes sendRespToServer on entry to the RESPONSE state). Indicates that the Authenticator attempted communication with the Authentication Server.')
rldot1xAuthMultiBackendAccessChallenges = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiBackendAccessChallenges.setReference('9.4.2, 8.5.6.2.2')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendAccessChallenges.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendAccessChallenges.setDescription('Counts the number of times that the state machine receives an initial Access-Challenge packet from the Authentication server (i.e., aReq becomes TRUE, causing exit from the RESPONSE state). Indicates that the Authentication Server has communication with the Authenticator.')
rldot1xAuthMultiBackendOtherRequestsToSupplicant = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiBackendOtherRequestsToSupplicant.setReference('9.4.2, 8.5.6.2.3')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendOtherRequestsToSupplicant.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendOtherRequestsToSupplicant.setDescription('Counts the number of times that the state machine sends an EAP-Request packet (other than an Identity, Notification, Failure or Success message) to the Supplicant (i.e., executes txReq on entry to the REQUEST state). Indicates that the Authenticator chose an EAP-method.')
rldot1xAuthMultiBackendNonNakResponsesFromSupplicant = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiBackendNonNakResponsesFromSupplicant.setReference('9.4.2, 8.5.6.2.4')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendNonNakResponsesFromSupplicant.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendNonNakResponsesFromSupplicant.setDescription('Counts the number of times that the state machine receives a response from the Supplicant to an initial EAP-Request, and the response is something other than EAP-NAK (i.e., rxResp becomes TRUE, causing the state machine to transition from REQUEST to RESPONSE, and the response is not an EAP-NAK). Indicates that the Supplicant can respond to the Authenticators chosen EAP-method.')
rldot1xAuthMultiBackendAuthSuccesses = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 12, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiBackendAuthSuccesses.setReference('9.4.2, 8.5.6.2.5')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendAuthSuccesses.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendAuthSuccesses.setDescription('Counts the number of times that the state machine receives an EAP-Success message from the Authentication Server (i.e., aSuccess becomes TRUE, causing a transition from RESPONSE to SUCCESS). Indicates that the Supplicant has successfully authenticated to the Authentication Server.')
rldot1xAuthMultiSessionStatsTable = MibTable((1, 3, 6, 1, 4, 1, 89, 95, 13), )
if mibBuilder.loadTexts: rldot1xAuthMultiSessionStatsTable.setReference('9.4.4')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionStatsTable.setDescription('A table that contains the session statistics objects for the Authenticator PAE associated with each Port. An entry appears in this table for each port that may authenticate access to itself.')
rldot1xAuthMultiSessionStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 95, 13, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1xAuthMultiSessionStatsPortNumber"), (0, "RADLAN-MIB", "rldot1xAuthMultiSessionStatsSourceMac"))
if mibBuilder.loadTexts: rldot1xAuthMultiSessionStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionStatsEntry.setDescription('The session statistics information for an Authenticator PAE. This shows the mandatory values being collected for each session that is still in progress, or the final values for the last valid session on each port where there is no session currently active.')
rldot1xAuthMultiSessionStatsPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 13, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiSessionStatsPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionStatsPortNumber.setDescription('Port Number.')
rldot1xAuthMultiSessionStatsSourceMac = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 13, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiSessionStatsSourceMac.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionStatsSourceMac.setDescription('Mac of the authentication session.')
rldot1xAuthMultiSessionOctetsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 13, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiSessionOctetsRx.setReference('9.4.4, Session Octets Received')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionOctetsRx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionOctetsRx.setDescription('The number of octets received in user data frames on this Port during the session.')
rldot1xAuthMultiSessionOctetsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 13, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiSessionOctetsTx.setReference('9.4.4, Session Octets Transmitted')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionOctetsTx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionOctetsTx.setDescription('The number of octets transmitted in user data frames on this Port during the session.')
rldot1xAuthMultiSessionFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 13, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiSessionFramesRx.setReference('9.4.4, Session Frames Received')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionFramesRx.setDescription('The number of user data frames received on this Port during the session.')
rldot1xAuthMultiSessionFramesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 13, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiSessionFramesTx.setReference('9.4.4, Session Frames Transmitted')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionFramesTx.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionFramesTx.setDescription('The number of user data frames transmitted on this Port during the session.')
rldot1xAuthMultiSessionId = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 13, 1, 7), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiSessionId.setReference('9.4.4, Session Identifier')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionId.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionId.setDescription('A unique identifier for the session, in the form of a printable ASCII string of at least three characters.')
rldot1xAuthMultiSessionTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 13, 1, 8), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiSessionTime.setReference('9.4.4, Session Time')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionTime.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionTime.setDescription('The duration of the session in seconds.')
rldot1xAuthMultiSessionUserName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 13, 1, 9), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiSessionUserName.setReference('9.4.4, Session User Name')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionUserName.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSessionUserName.setDescription('The User-Name representing the identity of the Supplicant PAE.')
rldot1xAuthMultiConfigTable = MibTable((1, 3, 6, 1, 4, 1, 89, 95, 14), )
if mibBuilder.loadTexts: rldot1xAuthMultiConfigTable.setReference('9.4.1 Authenticator Configuration')
if mibBuilder.loadTexts: rldot1xAuthMultiConfigTable.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiConfigTable.setDescription('A table that contains the configuration objects for the Authenticator PAE associated with each port and MAC. An entry appears in this table for each port and MAC that may authenticate access to itself.')
rldot1xAuthMultiConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 95, 14, 1), ).setIndexNames((0, "RADLAN-MIB", "rldot1xAuthMultiPortNumber"), (0, "RADLAN-MIB", "rldot1xAuthMultiSourceMac"))
if mibBuilder.loadTexts: rldot1xAuthMultiConfigEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiConfigEntry.setDescription('The configuration information for an Authenticator PAE.')
rldot1xAuthMultiPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 14, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiPortNumber.setDescription('Port Number.')
rldot1xAuthMultiSourceMac = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 14, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiSourceMac.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiSourceMac.setDescription('Mac of the authentication session.')
rldot1xAuthMultiPaeState = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 14, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("initialize", 1), ("disconnected", 2), ("connecting", 3), ("authenticating", 4), ("authenticated", 5), ("aborting", 6), ("held", 7), ("forceAuth", 8), ("forceUnauth", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiPaeState.setReference('9.4.1, Authenticator PAE state')
if mibBuilder.loadTexts: rldot1xAuthMultiPaeState.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiPaeState.setDescription('The current value of the Authenticator PAE state machine.')
rldot1xAuthMultiBackendAuthState = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 14, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("request", 1), ("response", 2), ("success", 3), ("fail", 4), ("timeout", 5), ("idle", 6), ("initialize", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiBackendAuthState.setReference('9.4.1, Backend Authentication state')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendAuthState.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiBackendAuthState.setDescription('The current state of the Backend Authentication state machine.')
rldot1xAuthMultiControlledPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 95, 14, 1, 5), PaeControlledPortStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rldot1xAuthMultiControlledPortStatus.setReference('9.4.1, AuthControlledPortStatus')
if mibBuilder.loadTexts: rldot1xAuthMultiControlledPortStatus.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xAuthMultiControlledPortStatus.setDescription('The current value of the controlled Port status parameter for the Port.')
rldot1xBpduFilteringEnabled = MibScalar((1, 3, 6, 1, 4, 1, 89, 95, 15), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rldot1xBpduFilteringEnabled.setStatus('mandatory')
if mibBuilder.loadTexts: rldot1xBpduFilteringEnabled.setDescription('Specify that when 802.1x is globally disabled, 802.1x BPDU packets would be filtered or bridged.')
rlFile = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 96))
rlAAAEap = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 97))
rlSNMP = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 98))
rlSsl = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 100))
rlMacBasePrio = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 101))
rlWlanAccessPoint = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 102))
rlLocalization = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 103))
rlRs232 = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 104))
rlNicRedundancy = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 105))
rlAmap = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 106))
rlStack = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 107))
rlPoe = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 108))
rlUPnP = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 109))
rlLldp = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 110))
rlOib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 111))
rlBridgeSecurity = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 112))
rlDhcpSpoofing = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 113))
rlBonjour = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 114))
rlLinksysSmartMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 115))
rlBrgMulticast = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 116))
rlBrgMcMngr = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 117))
rlGlobalIpAddrTable = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 118))
dlPrivate = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 119))
rlSecuritySuiteMib = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 120))
rlIntel = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 121))
rlAutoUpdate = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 123))
rlCpuCounters = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 124))
rlWlanMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 200))
rndEndOfMibGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 1000))
rndEndOfMib = MibScalar((1, 3, 6, 1, 4, 1, 89, 1000, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rndEndOfMib.setStatus('mandatory')
if mibBuilder.loadTexts: rndEndOfMib.setDescription(' This variable indicates this is the end of RND MIB.')
rxOverflowHWFault = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,3)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rxOverflowHWFault.setDescription('An RX buffer overflow has occurred in one of the LAN or link interfaces. The bound variable rndErrorDesc provides the interface number.')
txOverflowHWFault = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,4)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: txOverflowHWFault.setDescription('Interport queue overflow has occurred in one of the LAN or link interfaces. The bound variable rndErrorDesc provides the interface number.')
routeTableOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,5)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: routeTableOverflow.setDescription('An overflow condition has occurred in the Routing Table. The Routing Table is used for IP routing algorithm (RIP).')
resetRequired = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,10)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: resetRequired.setDescription('This trap indicates that in order to perform the last SET request, a reset operation of the router/bridge is required. This occurs when the layer 2 routing algorithm is changed between SPF and Spanning Tree. The reset can be performed manually or using the variable rndAction.')
endTftp = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,12)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: endTftp.setDescription('This trap indicates that in the device finished a TFTP transaction with the management station. variable rndErrorDesc and rndErrorSeverity provides the actual message text and severity respectively.')
abortTftp = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,13)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: abortTftp.setDescription('This trap indicates that in the device aborted a TFTP session with the management station. Variable rndErrorDesc and rndErrorSeverity provides the actual message text and severity respectively.')
startTftp = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,14)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: startTftp.setDescription('Informational trap indicating that the device has intiated a TFTP session. rndErrorDesc will contain the file type in question')
faultBackUp = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,23)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: faultBackUp.setDescription('Automantic switchover to backup link because of main link fault.')
mainLinkUp = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,24)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: mainLinkUp.setDescription('Communication returened to main link.')
ipxRipTblOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,36)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: ipxRipTblOverflow.setDescription('This trap indicates that in an OpenGate IPX RIP table overflow. The bound variable rndErrorDesc, rndErrorSeverity provides the actual message text and severity respectively.')
ipxSapTblOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,37)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: ipxSapTblOverflow.setDescription('This trap indicates that in an OpenGate IPX SAP table overflow. The bound variable rndErrorDesc, rndErrorSeverity provides the actual message text and severity respectively.')
facsAccessVoilation = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,49)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: facsAccessVoilation.setDescription('This trap indicates that message that fits FACS statenebt with operation blockAndReport was forward to the interface. The bound variable rndErrorDesc, rndErrorSeverity(== info ) and interface Number.')
autoConfigurationCompleted = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,50)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: autoConfigurationCompleted.setDescription('This trap indicates that auto comfiguration completetd succssefully. The bound variable rndErrorDesc, rndErrorSeverity(== info )')
forwardingTabOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,51)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: forwardingTabOverflow.setDescription('This trap indicates that an overflow condition has occurred in the layer II Forward Table. The bound variable rndErrorDesc, rndErrorSeverity(== warning )')
framRelaySwitchConnectionUp = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,53)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: framRelaySwitchConnectionUp.setDescription('This trap indicates that a connection establish between the Frame relay Switch and the WanGate. The bound variable rndErrorDesc, rndErrorSeverity(== warning )')
framRelaySwitchConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,54)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: framRelaySwitchConnectionDown.setDescription('This trap indicates that a connection between the Frame Relay Switch and the WanGate failed. The bound variable rndErrorDesc, rndErrorSeverity(== warning )')
errorsDuringInit = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,61)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: errorsDuringInit.setDescription('This trap indicates that the an error occured during initialization The bound variable rndErrorDesc, rndErrorSeverity(== error )')
vlanDynPortAdded = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,66)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: vlanDynPortAdded.setDescription('')
vlanDynPortRemoved = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,67)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: vlanDynPortRemoved.setDescription('')
rsSDclientsTableOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,68)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rsSDclientsTableOverflow.setDescription('This warning is generated when an overflow occurs in the clients table.')
rsSDinactiveServer = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,69)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rsSDinactiveServer.setDescription('This warning is generated when a server does not respond to the dispatchers polling and is thought to be inactive.')
rsIpZhrConnectionsTableOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,70)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rsIpZhrConnectionsTableOverflow.setDescription('The Zero Hop Routing connections Table has been overflown.')
rsIpZhrReqStaticConnNotAccepted = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,71)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rsIpZhrReqStaticConnNotAccepted.setDescription('The requested static connection was not accepted because there is no available IP virtual address to allocate to it.')
rsIpZhrVirtualIpAsSource = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,72)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rsIpZhrVirtualIpAsSource.setDescription('The virtual IP address appeared as a source IP. All the connections using it will be deleted and it will not be further allocated to new connections.')
rsIpZhrNotAllocVirtualIp = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,73)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rsIpZhrNotAllocVirtualIp.setDescription('The source IP address sent an ARP specifying a virtual IP which was not allocated for this source. This virtual IP will not be allocated to connections of this specific source IP.')
rsSnmpSetRequestInSpecialCfgState = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,74)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rsSnmpSetRequestInSpecialCfgState.setDescription('An incoming SNMP SET request was rejected because no such requests (except action requests) are accepted after start of new configuration reception or during sending the current configuration to an NMS.')
rsPingCompletion = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,136)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rsPingCompletion.setDescription('A rsPingCompleted trap is sent at the completion of a sequence of pings if such a trap was requested when the sequence was initiated. The trap severity is info. The trap text will specify the following information: rsPingCompletionStatus, rsPingSentPackets, rsPingReceivedPackets In addition to the above listed objects (which are always present), the message will also specify the following quantities: if any responses were received: rsPingMinReturnTime rsPingAvgReturnTime rsPingMaxReturnTime')
pppSecurityViolation = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,137)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: pppSecurityViolation.setDescription('This trap indicates that a PPP link got an unrecognized secret. The bound variables rndErrorDesc, rndErrorSeverity(== warning ), interface Number. and pppSecurityIdentity')
frDLCIStatudChange = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,138)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: frDLCIStatudChange.setDescription('')
papFailedCommunication = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,139)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: papFailedCommunication.setDescription('')
chapFailedCommunication = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,140)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: chapFailedCommunication.setDescription('')
rsWSDRedundancySwitch = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,141)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rsWSDRedundancySwitch.setDescription('Whenever main server fails and backup takes over or server comes up after failure a trap of this type is issued.')
rsDhcpAllocationFailure = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,142)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rsDhcpAllocationFailure.setDescription('DHCP failed to allocate an IP address to a requesting host because of memory shortage or inadequate configuration of available IP addresses.')
rlIpFftStnOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,145)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlIpFftStnOverflow.setDescription('The IP SFFT overflow.')
rlIpFftSubOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,146)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlIpFftSubOverflow.setDescription('The IP NFFT overflow.')
rlIpxFftStnOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,147)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlIpxFftStnOverflow.setDescription('The IPX SFFT overflow.')
rlIpxFftSubOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,148)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlIpxFftSubOverflow.setDescription('The IPX NFFT overflow.')
rlIpmFftOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,149)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlIpmFftOverflow.setDescription('The IPM FFT overflow.')
rlPhysicalDescriptionChanged = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,150)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlPhysicalDescriptionChanged.setDescription('Indicates that the physical decription of the device has changed')
rldot1dStpPortStateForwarding = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,151)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"), ("RADLAN-MIB", "rldot1dStpTrapVrblifIndex"), ("RADLAN-MIB", "rldot1dStpTrapVrblVID"))
if mibBuilder.loadTexts: rldot1dStpPortStateForwarding.setDescription('The trap is sent by a bridge when any of its configured ports transitions from the Learning state to the Forwarding state.')
rldot1dStpPortStateNotForwarding = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,152)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"), ("RADLAN-MIB", "rldot1dStpTrapVrblifIndex"), ("RADLAN-MIB", "rldot1dStpTrapVrblVID"))
if mibBuilder.loadTexts: rldot1dStpPortStateNotForwarding.setDescription('The trap is sent by a bridge when any of its configured ports transitions from the Forwarding state to the Blocking state.')
rlPolicyDropPacketTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,153)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlPolicyDropPacketTrap.setDescription('Indicates that the packet drop due to the policy ')
rlPolicyForwardPacketTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,154)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlPolicyForwardPacketTrap.setDescription('Indicates that the packet has forward based on policy')
rlIgmpProxyTableOverflow = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,156)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlIgmpProxyTableOverflow.setDescription('An IGMP PROXY Table overflow.')
rlIgmpV1MsgReceived = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,157)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlIgmpV1MsgReceived.setDescription('An IGMP Message of v1 received on ifIndex. ')
rlVrrpEntriesDeleted = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,158)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlVrrpEntriesDeleted.setDescription('One or more VRRP entries deleted due to IP interface deletion or transition. ')
rlHotSwapTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,159)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlHotSwapTrap.setDescription('Hot swap trap.')
rlTrunkPortAddedTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,160)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlTrunkPortAddedTrap.setDescription('Informational trap indicating that a port is added to a trunk')
rlTrunkPortRemovedTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,161)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlTrunkPortRemovedTrap.setDescription('This warning is generated when a port removed from a trunk.')
rlTrunkPortNotCapableTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,162)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlTrunkPortNotCapableTrap.setDescription('Informational trap indicating that a port can not be added to a trunk because of device limitations or diffrent swIfType.')
rlLockPortTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,170)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlLockPortTrap.setDescription('Informational trap indicating that a locked port receive a frame with new source Mac Address.')
vlanDynVlanAdded = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,171)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: vlanDynVlanAdded.setDescription('add gvrp dynamic vlan')
vlanDynVlanRemoved = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,172)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: vlanDynVlanRemoved.setDescription('remove gvrp dynamic vlan')
vlanDynamicToStatic = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,173)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: vlanDynamicToStatic.setDescription('vlan status was changed from dynamic to static')
vlanStaticToDynamic = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,174)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: vlanStaticToDynamic.setDescription('vlan status was changed from static to dynamic')
dstrSysLog = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,175)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: dstrSysLog.setDescription('Master receive trap from slave , and forward it as trap')
rlEnvMonFanStateChange = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,176)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlEnvMonFanStateChange.setDescription('Trap indicating the fan state. rndErrorSeverity will be: 0 for fan state nomal, notPresent 1 for fan state warning, notFunctioning 2 for fan state critical 3 for fan state fatal The error text will specify the fan index, fan description and the exact fan state')
rlEnvMonPowerSupplyStateChange = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,177)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlEnvMonPowerSupplyStateChange.setDescription('Trap indicating the power supply state. rndErrorSeverity will be: 0 for power supply state nomal, notPresent 1 for power supply state warning, notFunctioning 2 for power supply state critical 3 for power supply state fatal The error text will specify the power supply index, power supply description and the exact power supply state')
rlStackStateChange = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,178)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlStackStateChange.setDescription('Trap indicating the stack connection state 0 for stack state connected, 1 for stack state disconnected ')
rlEnvMonTemperatureRisingAlarm = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,179)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlEnvMonTemperatureRisingAlarm.setDescription('Trap indicating that the temperature in the device has exceeded the device specific safe temperature threshold.')
rlBrgMacAddFailedTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,183)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlBrgMacAddFailedTrap.setDescription('Informational trap indicating that adding dynamic mac/s failed due to full hash chain.')
rldot1xPortStatusAuthorizedTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,184)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rldot1xPortStatusAuthorizedTrap.setDescription('Informational trap indicating that port 802.1x status is authorized.')
rldot1xPortStatusUnauthorizedTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,185)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rldot1xPortStatusUnauthorizedTrap.setDescription('Warning trap is indicating that port 802.1x status is unAuthorized.')
swIfTablePortLock = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,192)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: swIfTablePortLock.setDescription('Warning trap is indicating port lock hase began.')
swIfTablePortUnLock = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,193)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: swIfTablePortUnLock.setDescription('Warning trap is indicating port lock has ended.')
rlAAAUserLocked = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,194)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rlAAAUserLocked.setDescription('Warning trap indicating that the user was locked after number of consecutives unsuccessful login attempts.')
bpduGuardPortSuspended = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,202)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: bpduGuardPortSuspended.setDescription('Warning trap indicating - Port was suspended because there was BPDU Guard violation.')
rldot1xSupplicantMacAuthorizedTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,203)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rldot1xSupplicantMacAuthorizedTrap.setDescription('Informational trap indicating that MAC authentication supplicant is authenticated and is allowed to access the network.')
rldot1xSupplicantMacUnauthorizedTrap = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,204)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: rldot1xSupplicantMacUnauthorizedTrap.setDescription('Warning trap is indicating that Radius server rejects MAC authentication supplicant.')
stpLoopbackDetection = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,205)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: stpLoopbackDetection.setDescription('Warning trap indicating - A loopback was detected on port.')
stpLoopbackDetectionResolved = NotificationType((1, 3, 6, 1, 4, 1, 89) + (0,206)).setObjects(("RADLAN-MIB", "rndErrorDesc"), ("RADLAN-MIB", "rndErrorSeverity"))
if mibBuilder.loadTexts: stpLoopbackDetectionResolved.setDescription('Warning trap indicating - A loopback detection resolved on port.')
mibBuilder.exportSymbols("RADLAN-MIB", rlLockPortTrap=rlLockPortTrap, rldot1dStpPortBpduSent=rldot1dStpPortBpduSent, rlTrunkPortNotCapableTrap=rlTrunkPortNotCapableTrap, rldot1xAuthMultiSessionUserName=rldot1xAuthMultiSessionUserName, rldot1sMstpPendingRevisionLevel=rldot1sMstpPendingRevisionLevel, rldot1xAuthMultiInvalidEapolFramesRx=rldot1xAuthMultiInvalidEapolFramesRx, rlPhysicalDescriptionChanged=rlPhysicalDescriptionChanged, rldot1sMstpRevisionLevel=rldot1sMstpRevisionLevel, rlIpxFftStnOverflow=rlIpxFftStnOverflow, rldot1dStpPortLoopback=rldot1dStpPortLoopback, rldot1xAuthMultiSessionOctetsTx=rldot1xAuthMultiSessionOctetsTx, rlPortGmrpLeaveAllTime=rlPortGmrpLeaveAllTime, rlDhcpApprovalWaitingMask=rlDhcpApprovalWaitingMask, rldot1xMibVersion=rldot1xMibVersion, rldot1sMstpExtPortEntry=rldot1sMstpExtPortEntry, rlIgmpV1MsgReceived=rlIgmpV1MsgReceived, rlPortGvrpStatisticsSJE=rlPortGvrpStatisticsSJE, rlPortGmrpOverrideGarp=rlPortGmrpOverrideGarp, rlDhcpClActionTable=rlDhcpClActionTable, rlTelnetUsersTable=rlTelnetUsersTable, rldot1sMstpExtPortInternalAdminPathCost=rldot1sMstpExtPortInternalAdminPathCost, rldot1dStpPortBpduGuardEntry=rldot1dStpPortBpduGuardEntry, rldot1dTpAgingTime=rldot1dTpAgingTime, rldot1xAuthMultiSessionFramesRx=rldot1xAuthMultiSessionFramesRx, rlLinksysSmartMIB=rlLinksysSmartMIB, rndSoftwareFile=rndSoftwareFile, rlMD5KeyChainEntry=rlMD5KeyChainEntry, rldot1xAuthMultiSessionOctetsRx=rldot1xAuthMultiSessionOctetsRx, rldot1sMstpInstancePortDesignatedPort=rldot1sMstpInstancePortDesignatedPort, rlStormCtrlSetDefaultUnknownUnicastEnable=rlStormCtrlSetDefaultUnknownUnicastEnable, rlLldp=rlLldp, rlMirVlanBaseReservedPortsEntry=rlMirVlanBaseReservedPortsEntry, rldot1wRStpVlanEdgePortTable=rldot1wRStpVlanEdgePortTable, rlSyslog=rlSyslog, rldot1xAuthMultiControlledPortStatus=rldot1xAuthMultiControlledPortStatus, rlPortGvrpRegistrationModeEntry=rlPortGvrpRegistrationModeEntry, rldot1xAuthenticationPortTable=rldot1xAuthenticationPortTable, rlTimeSynchronization=rlTimeSynchronization, rlDnsCl=rlDnsCl, rndImageInfoEntry=rndImageInfoEntry, rlPortGvrpApplicantStatusEntry=rlPortGvrpApplicantStatusEntry, rldot1sMstpInstancePortDesignatedBridge=rldot1sMstpInstancePortDesignatedBridge, rldot1xAuthMultiStatsSourceMac=rldot1xAuthMultiStatsSourceMac, rlPortGvrpStatisticsRLE=rlPortGvrpStatisticsRLE, faultBackUp=faultBackUp, rldot1dStpVlanPortTable=rldot1dStpVlanPortTable, rldot1sMstpRegionalRoot=rldot1sMstpRegionalRoot, rlTelnetSessionStatus=rlTelnetSessionStatus, rndBridgeType=rndBridgeType, rldot1wRStpVlanEdgePortEntry=rldot1wRStpVlanEdgePortEntry, rldot1dExtBaseMibVersion=rldot1dExtBaseMibVersion, rlEmbWeb=rlEmbWeb, mainLinkUp=mainLinkUp, rlPolicyForwardPacketTrap=rlPolicyForwardPacketTrap, rlBrgMacSwSpanningTree=rlBrgMacSwSpanningTree, rldot1dStpPortAutoEdgePort=rldot1dStpPortAutoEdgePort, rlPortCopyMibVersion=rlPortCopyMibVersion, rlStormCtrlTable=rlStormCtrlTable, rndBootPRelaySecThreshold=rndBootPRelaySecThreshold, rndImage1Version=rndImage1Version, rlPortGvrpErrorStatisticsInvPlen=rlPortGvrpErrorStatisticsInvPlen, rlGmrpMibVersion=rlGmrpMibVersion, rndEndOfMib=rndEndOfMib, rlStormCtrlBroadcastEnable=rlStormCtrlBroadcastEnable, rlCliFileEnable=rlCliFileEnable, rldot1xAuthMultiAuthSuccessWhileAuthenticating=rldot1xAuthMultiAuthSuccessWhileAuthenticating, rlAggregateVlanEntry=rlAggregateVlanEntry, rlDot3adAgg=rlDot3adAgg, rldot1dStpHelloTime=rldot1dStpHelloTime, rlStormCtrlGroupEntry=rlStormCtrlGroupEntry, rlDhcpApprovalActionApprove=rlDhcpApprovalActionApprove, rlStormCtrlGroupTable=rlStormCtrlGroupTable, rlCDB=rlCDB, rldot1xAuthMultiEapolFramesTx=rldot1xAuthMultiEapolFramesTx, rldot1dStpSeparatedBridgesEnable=rldot1dStpSeparatedBridgesEnable, vlanStaticToDynamic=vlanStaticToDynamic, rldot1sMstpVlan=rldot1sMstpVlan, rldot1xAuthenticationPortMethod=rldot1xAuthenticationPortMethod, rldot1xAuthMultiEapolRespFramesRx=rldot1xAuthMultiEapolRespFramesRx, rldot1xAuthMultiPaeState=rldot1xAuthMultiPaeState, rldot1sMstpInstancePortTable=rldot1sMstpInstancePortTable, rlAAAUserLocked=rlAAAUserLocked, rlPortGvrpStatisticsSEmp=rlPortGvrpStatisticsSEmp, rlTelnetSessionClientAddress=rlTelnetSessionClientAddress, rldot1xAuthMultiPortNumber=rldot1xAuthMultiPortNumber, rlIfInterfaces=rlIfInterfaces, rldot1dStpSeparatedBridgesAutoConfig=rldot1dStpSeparatedBridgesAutoConfig, rldot1dStpFilterBpdu=rldot1dStpFilterBpdu, rlArpSpoofingEntry=rlArpSpoofingEntry, rlEnvMonFanStateChange=rlEnvMonFanStateChange, rldot1dStpTimeSinceTopologyChange=rldot1dStpTimeSinceTopologyChange, rldot1dStpPortPort=rldot1dStpPortPort, rlStormCtrlSetDefaultUnknownMulticastEnable=rlStormCtrlSetDefaultUnknownMulticastEnable, rlDhcpClCommandAction=rlDhcpClCommandAction, rldot1xAuthMultiConfigTable=rldot1xAuthMultiConfigTable, rldot1xUnAuthenticatedVlanStatus=rldot1xUnAuthenticatedVlanStatus, rldot1dStpVlanPortDesignatedRoot=rldot1dStpVlanPortDesignatedRoot, rldot1dStpVlanPortDesignatedBridge=rldot1dStpVlanPortDesignatedBridge, rldot1sMstpInstancePortState=rldot1sMstpInstancePortState, rlStormCtrlRateSupportPerType=rlStormCtrlRateSupportPerType, rlMir=rlMir, rldot1dStpVlanPortPathCost=rldot1dStpVlanPortPathCost, rlManualReboot=rlManualReboot, rndManagedTime=rndManagedTime, rndImage2Date=rndImage2Date, rldot1sMstpRemainingHops=rldot1sMstpRemainingHops, rlMirInterfaceTable=rlMirInterfaceTable, rlPortGvrpErrorStatisticsInvAtyp=rlPortGvrpErrorStatisticsInvAtyp, rlAggregateVlanName=rlAggregateVlanName, rlStormCtrlSetDefaultUnknownMulticastRate=rlStormCtrlSetDefaultUnknownMulticastRate, rlTelnet=rlTelnet, rlUPnP=rlUPnP, rlTraceRouteMibVersion=rlTraceRouteMibVersion, rldot1xAuthMultiDiagTable=rldot1xAuthMultiDiagTable, rlBrgMacSwAdminTrapFrequency=rlBrgMacSwAdminTrapFrequency, rndBackupConfigurationEnabled=rndBackupConfigurationEnabled, rndCommunityStatus=rndCommunityStatus, rldot1dStpMonitorTime=rldot1dStpMonitorTime, rldot1dStpPortEntry=rldot1dStpPortEntry, rlPolicy=rlPolicy, rlSocketUpTime=rlSocketUpTime, rlStormCtrlGroupUnknownMulticastId=rlStormCtrlGroupUnknownMulticastId, rldot1sMstpRegionalRootCost=rldot1sMstpRegionalRootCost, rldot1sMstpInstancePortPort=rldot1sMstpInstancePortPort, rl3sw2swTables=rl3sw2swTables, abortTftp=abortTftp, rlSocketType=rlSocketType, rldot1pPriorityMapName=rldot1pPriorityMapName, stpLoopbackDetectionResolved=stpLoopbackDetectionResolved, rldot1dStpTaggedFlooding=rldot1dStpTaggedFlooding, rlEnvMonPowerSupplyStateChange=rlEnvMonPowerSupplyStateChange, rldot1wRStpVlanEdgePortVlan=rldot1wRStpVlanEdgePortVlan, rlDhcpApprovalWaitingGateway=rlDhcpApprovalWaitingGateway, rlMridEntry=rlMridEntry, rlMirVlanBaseLogicalPortsIfIndex=rlMirVlanBaseLogicalPortsIfIndex, rlSwPackageVersion=rlSwPackageVersion, rlPortCopySupport=rlPortCopySupport, rldot1xAuthMultiConfigEntry=rldot1xAuthMultiConfigEntry, rldot1sMstpPendingAction=rldot1sMstpPendingAction, rlMirVlanBaseReservedPortsTable=rlMirVlanBaseReservedPortsTable, rlTelnetSecondLoginBanner=rlTelnetSecondLoginBanner, rndIpHostManagementIfIndex=rndIpHostManagementIfIndex, framRelaySwitchConnectionUp=framRelaySwitchConnectionUp, rlSwPackageVersionVesrion=rlSwPackageVersionVesrion, rldot1xAuthMultiEapolReqFramesTx=rldot1xAuthMultiEapolReqFramesTx, rldot1dStpExtendedPortNumberFormat=rldot1dStpExtendedPortNumberFormat, rlAggregateVlanTable=rlAggregateVlanTable, rlLocalization=rlLocalization, rldot1sMstpInstancePortDesignatedCost=rldot1sMstpInstancePortDesignatedCost, vlanDynVlanRemoved=vlanDynVlanRemoved, rlPortGvrpStatisticsClear=rlPortGvrpStatisticsClear, rsDhcpAllocationFailure=rsDhcpAllocationFailure, rldot1sMstpInstancePortDesignatedRoot=rldot1sMstpInstancePortDesignatedRoot, rldot1sMstpPendingConfigurationName=rldot1sMstpPendingConfigurationName, rlAggregateVlanPhysAddressType=rlAggregateVlanPhysAddressType, rlCpuCounters=rlCpuCounters, rldot1sMstpExtPortTable=rldot1sMstpExtPortTable, rlSwPackageVersionTable=rlSwPackageVersionTable, rldot1pPriorityMapPriority=rldot1pPriorityMapPriority, rldot1sMstpDesignatedMaxHopes=rldot1sMstpDesignatedMaxHopes, rndImageInfoTable=rndImageInfoTable, rldot1wRStpVlanEdgePortPort=rldot1wRStpVlanEdgePortPort, rlGmrpVlanEntry=rlGmrpVlanEntry, rlSecuritySuiteMib=rlSecuritySuiteMib, rlIpxFftSubOverflow=rlIpxFftSubOverflow, rlMD5KeyRowStatus=rlMD5KeyRowStatus, rlGvrpMibVersion=rlGvrpMibVersion, vlanDynPortRemoved=vlanDynPortRemoved, rlBrgMacSwVersion=rlBrgMacSwVersion, rlMridConnection=rlMridConnection, rndIpHostManagementSupported=rndIpHostManagementSupported, rldot1xAuthenticationPortEntry=rldot1xAuthenticationPortEntry, rlStormCtrlUnknownUnicastEnable=rlStormCtrlUnknownUnicastEnable, rlBrgMcMngr=rlBrgMcMngr, rldot1sMStpInstancePortAdminPathCost=rldot1sMStpInstancePortAdminPathCost, rlRCliConfigPassword=rlRCliConfigPassword, rlPortGvrpStatisticsTable=rlPortGvrpStatisticsTable, rlDhcpClActionEntry=rlDhcpClActionEntry, rlJumboFramesStatusAfterReset=rlJumboFramesStatusAfterReset, rldot1dStpSeparatedBridgesEntry=rldot1dStpSeparatedBridgesEntry, rldot1dStpVlanPortEnable=rldot1dStpVlanPortEnable, rlManagedMrid=rlManagedMrid, rldot1dStpPortBpduGuardEnable=rldot1dStpPortBpduGuardEnable, rlTelnetMibVersion=rlTelnetMibVersion, rldot1xAuthMultiBackendNonNakResponsesFromSupplicant=rldot1xAuthMultiBackendNonNakResponsesFromSupplicant, rlOib=rlOib, rlWlanMIB=rlWlanMIB, rlStormCtrlEnbaleDependencyBetweenTypes=rlStormCtrlEnbaleDependencyBetweenTypes, rlFile=rlFile, rlDhcpApprovalEnabled=rlDhcpApprovalEnabled, rlStack=rlStack, rndDeviceParams=rndDeviceParams, rlStormCtrlBroadcastRate=rlStormCtrlBroadcastRate, rldot1dStpBpduType=rldot1dStpBpduType, rndNotifications=rndNotifications, rlIpMRouteStdMIB=rlIpMRouteStdMIB, rlAAA=rlAAA, rlBrgMacSwTrapEnable=rlBrgMacSwTrapEnable, rldot1dStpTypeAfterReset=rldot1dStpTypeAfterReset, rlPortGvrpErrorStatisticsTable=rlPortGvrpErrorStatisticsTable, rlDhcpApprovalWaitingTable=rlDhcpApprovalWaitingTable, rldot1dStpVlanPortForwardTransitions=rldot1dStpVlanPortForwardTransitions, rlMD5KeyStopAccept=rlMD5KeyStopAccept, rldot1x=rldot1x, endTftp=endTftp, rlCopy=rlCopy, VlanPriority=VlanPriority, rlPortGvrpTimersEntry=rlPortGvrpTimersEntry, rlMirCurMriNum=rlMirCurMriNum, rlAggregateSubVlanStatus=rlAggregateSubVlanStatus, rlStormCtrl=rlStormCtrl, rlDhcpClActionHostName=rlDhcpClActionHostName, dstrSysLog=dstrSysLog, rlPhy=rlPhy, rldot1sMstpInstanceId=rldot1sMstpInstanceId, rlPortGvrpStatisticsRLA=rlPortGvrpStatisticsRLA, rldot1dStpPortToDefault=rldot1dStpPortToDefault, rlMirVlanBaseLogicalPortsTable=rlMirVlanBaseLogicalPortsTable, rlPortGvrpApplicantStatusTable=rlPortGvrpApplicantStatusTable, rlMirVlanBaseLogicalPortsStatus=rlMirVlanBaseLogicalPortsStatus, rldot1sMstpInstancePortMstiId=rldot1sMstpInstancePortMstiId, rlIntel=rlIntel, rlArpSpoofingMibVersion=rlArpSpoofingMibVersion, rldot1dStpVlanPortPriority=rldot1dStpVlanPortPriority, rlBrgMulticast=rlBrgMulticast, rlTelnetSessionId=rlTelnetSessionId, rlMirInterfaceIfIndex=rlMirInterfaceIfIndex, rlStormCtrlSetDefaultBroadcastEnable=rlStormCtrlSetDefaultBroadcastEnable, rlTelnetTimeout=rlTelnetTimeout, rsSDinactiveServer=rsSDinactiveServer, rlMirInterfaceEntry=rlMirInterfaceEntry, rndActiveSoftwareFile=rndActiveSoftwareFile, pppSecurityViolation=pppSecurityViolation, genGroupConfigurationSymbol=genGroupConfigurationSymbol, rldot1xAuthMultiDiagSourceMac=rldot1xAuthMultiDiagSourceMac, rldot1dTpAgingTimeMax=rldot1dTpAgingTimeMax, rlTrunkPortRemovedTrap=rlTrunkPortRemovedTrap, rsPingCompletion=rsPingCompletion, rlMD5KeyChainName=rlMD5KeyChainName, rlCliMibVersion=rlCliMibVersion, rlDhcpClActionStatus=rlDhcpClActionStatus, framRelaySwitchConnectionDown=framRelaySwitchConnectionDown, rldot1sMstpInstancePortEnable=rldot1sMstpInstancePortEnable, rldot1dStpPortBpduGuardTable=rldot1dStpPortBpduGuardTable, rlArpSpoofingOutPhysIfIndex=rlArpSpoofingOutPhysIfIndex, rldot1sMstpInstanceEnable=rldot1sMstpInstanceEnable, rlMirVlanBaseReservedPortsIfIndex=rlMirVlanBaseReservedPortsIfIndex, rldot1sMstpMaxHopes=rldot1sMstpMaxHopes, rldot1xAuthMultiStatsEntry=rldot1xAuthMultiStatsEntry, rldot1xAuthMultiSessionStatsSourceMac=rldot1xAuthMultiSessionStatsSourceMac, rldot1wRStp=rldot1wRStp, rlStackStateChange=rlStackStateChange, rlJumboFrames=rlJumboFrames, rldot1dStpDesignatedRoot=rldot1dStpDesignatedRoot)
mibBuilder.exportSymbols("RADLAN-MIB", bpduGuardPortSuspended=bpduGuardPortSuspended, rndBootP=rndBootP, rndErrorDesc=rndErrorDesc, rndIpHostManagement=rndIpHostManagement, rlPortGvrpStatisticsSLE=rlPortGvrpStatisticsSLE, rndManagedDate=rndManagedDate, rldot1dStpPortStateForwarding=rldot1dStpPortStateForwarding, rlRs232=rlRs232, rldot1xAuthMultiAuthReauthsWhileAuthenticated=rldot1xAuthMultiAuthReauthsWhileAuthenticated, rlExperience=rlExperience, rldot1dStpPortDampEnable=rldot1dStpPortDampEnable, rlGmrp=rlGmrp, ipSpec=ipSpec, rldot1dStpForwardDelay=rldot1dStpForwardDelay, rlPolicyDropPacketTrap=rlPolicyDropPacketTrap, rlAggregateVlanMibVersion=rlAggregateVlanMibVersion, rldot1dPriority=rldot1dPriority, swIfTablePortLock=swIfTablePortLock, rlStartupCDBChanged=rlStartupCDBChanged, rsSDclientsTableOverflow=rsSDclientsTableOverflow, rlMultiSessionTerminal=rlMultiSessionTerminal, rndCommunityTrapDestPort=rndCommunityTrapDestPort, rldot1dStpHoldTime=rldot1dStpHoldTime, rndEndOfMibGroup=rndEndOfMibGroup, rlIpmFftOverflow=rlIpmFftOverflow, rlDhcpCl=rlDhcpCl, rlIpFftStnOverflow=rlIpFftStnOverflow, rlArpSpoofing=rlArpSpoofing, rldot1pPriorityMap=rldot1pPriorityMap, rndCommunityTable=rndCommunityTable, rlCli=rlCli, rndCommunityEntry=rndCommunityEntry, rldot1dPriorityPortGroupNumber=rldot1dPriorityPortGroupNumber, rldot1dStpVlanPortPort=rldot1dStpVlanPortPort, rldot1wRStpForceVersionEntry=rldot1wRStpForceVersionEntry, rlPortCopyVlanTaggingTable=rlPortCopyVlanTaggingTable, rndErrorSeverity=rndErrorSeverity, rldot1sMstpExtPortBoundary=rldot1sMstpExtPortBoundary, rldot1xAuthMultiSessionStatsTable=rldot1xAuthMultiSessionStatsTable, rldot1sMstpInstancePortPathCost=rldot1sMstpInstancePortPathCost, rlSocketEntry=rlSocketEntry, rlMirVlanBaseReservedPortsStatus=rlMirVlanBaseReservedPortsStatus, rlQosCliMib=rlQosCliMib, rldot1pPriorityMapTable=rldot1pPriorityMapTable, rndBridgeAlarm=rndBridgeAlarm, rlDhcpClCommandTable=rlDhcpClCommandTable, rndUnitNumber=rndUnitNumber, rlRmonControl=rlRmonControl, rldot1wRStpForceVersionTable=rldot1wRStpForceVersionTable, rlMacBasePrio=rlMacBasePrio, rldot1dStpPortsEnable=rldot1dStpPortsEnable, rlMD5KeyChainKeyRowStatus=rlMD5KeyChainKeyRowStatus, rldot1sMstpGroup=rldot1sMstpGroup, rldot1sMstpInstancePortForwardTransitions=rldot1sMstpInstancePortForwardTransitions, rldot1sMstpInstanceEntry=rldot1sMstpInstanceEntry, rlDhcpSpoofing=rlDhcpSpoofing, rlDhcpApprovalActionIfIndex=rlDhcpApprovalActionIfIndex, rlMD5KeyStopGenerate=rlMD5KeyStopGenerate, rlAggregateSubVlanEntry=rlAggregateSubVlanEntry, rlStormCtrlUnknownMulticastRate=rlStormCtrlUnknownMulticastRate, rlGmrpSupported=rlGmrpSupported, rlRCliMibVersion=rlRCliMibVersion, rlPhysicalDescription=rlPhysicalDescription, rlMirVlanBaseLogicalPortsEntry=rlMirVlanBaseLogicalPortsEntry, rldot1xAuthMultiAuthFailWhileAuthenticating=rldot1xAuthMultiAuthFailWhileAuthenticating, rldot1dStpSeparatedBridges=rldot1dStpSeparatedBridges, rlPortGvrpTimersTable=rlPortGvrpTimersTable, rlGvrpSupported=rlGvrpSupported, rldot1xUserBasedVlanSupported=rldot1xUserBasedVlanSupported, rldot1sMstpVlanEntry=rldot1sMstpVlanEntry, rldot1sMstpVlanTable=rldot1sMstpVlanTable, rlSocketTable=rlSocketTable, rldot1xAuthMultiEapLengthErrorFramesRx=rldot1xAuthMultiEapLengthErrorFramesRx, rldot1xUnAuthenticatedVlanTable=rldot1xUnAuthenticatedVlanTable, rldot1xBpduFilteringEnabled=rldot1xBpduFilteringEnabled, rlAggregateVlanArpProxy=rlAggregateVlanArpProxy, rlSsl=rlSsl, rldot1pPriorityMapStatus=rldot1pPriorityMapStatus, rldot1xAuthMultiDiagPortNumber=rldot1xAuthMultiDiagPortNumber, rldot1sMstpInstanceMaxAge=rldot1sMstpInstanceMaxAge, rldot1dStpMaxAge=rldot1dStpMaxAge, rldot1sMstpExtPortPort=rldot1sMstpExtPortPort, rlArpSpoofingStatus=rlArpSpoofingStatus, rlPortCopyVlanTaggingEntry=rlPortCopyVlanTaggingEntry, rlSocket=rlSocket, rlEnvMonTemperatureRisingAlarm=rlEnvMonTemperatureRisingAlarm, rlStormCtrlSetDefaultMulticastRate=rlStormCtrlSetDefaultMulticastRate, rlRCliTimer=rlRCliTimer, rlRadius=rlRadius, rlIgmpProxyTableOverflow=rlIgmpProxyTableOverflow, vlan=vlan, rldot1dStpBpduCount=rldot1dStpBpduCount, rlBgp=rlBgp, rndCommunityOwner=rndCommunityOwner, rndImageInfo=rndImageInfo, rndBasePhysicalAddress=rndBasePhysicalAddress, rldot1sMstpExtPortDesignatedRegionalRoot=rldot1sMstpExtPortDesignatedRegionalRoot, rlBrgMacSwRedBoundary=rlBrgMacSwRedBoundary, rlStormCtrlSupport=rlStormCtrlSupport, ipxSapTblOverflow=ipxSapTblOverflow, rldot1dStpVlanPortDesignatedPort=rldot1dStpVlanPortDesignatedPort, rlPoe=rlPoe, rlGvrp=rlGvrp, rlMD5Key=rlMD5Key, rlGmrpVlanTag=rlGmrpVlanTag, rldot1xAuthMultiDiagEntry=rldot1xAuthMultiDiagEntry, rlAutoUpdate=rlAutoUpdate, txOverflowHWFault=txOverflowHWFault, rlMirMaxNumOfMRIs=rlMirMaxNumOfMRIs, rsUDP=rsUDP, rndImage2Time=rndImage2Time, rldot1wRStpForceVersionVlan=rldot1wRStpForceVersionVlan, startTftp=startTftp, rldot1dStpSupportedType=rldot1dStpSupportedType, rldot1xAuthMultiBackendOtherRequestsToSupplicant=rldot1xAuthMultiBackendOtherRequestsToSupplicant, rldot1dStpPortMustBelongToVlan=rldot1dStpPortMustBelongToVlan, rndCommunityTrapsEnable=rndCommunityTrapsEnable, rldot1pPriorityMapPort=rldot1pPriorityMapPort, routeTableOverflow=routeTableOverflow, rldot1dExtBase=rldot1dExtBase, rlArpSpoofingMacAddr=rlArpSpoofingMacAddr, rldot1dStpVlanPortState=rldot1dStpVlanPortState, rldot1xAuthMultiBackendAuthSuccesses=rldot1xAuthMultiBackendAuthSuccesses, rlPortGvrpStatisticsRJIn=rlPortGvrpStatisticsRJIn, rldot1dStpPortRole=rldot1dStpPortRole, genGroupHWVersion=genGroupHWVersion, rlRCliUserPassword=rlRCliUserPassword, autoConfigurationCompleted=autoConfigurationCompleted, rlPortGvrpErrorStatisticsInvProt=rlPortGvrpErrorStatisticsInvProt, rldot1sMstp=rldot1sMstp, rndIpHost=rndIpHost, rlPortGmrpJoinTime=rlPortGmrpJoinTime, rlRCli=rlRCli, genGroup=genGroup, rlDhcpClActionIfIndex=rlDhcpClActionIfIndex, rsIpZhrNotAllocVirtualIp=rsIpZhrNotAllocVirtualIp, rlArpSpoofingIfIndex=rlArpSpoofingIfIndex, rndActiveSoftwareFileAfterReset=rndActiveSoftwareFileAfterReset, rldot1xAuthMultiAuthReauthsWhileAuthenticating=rldot1xAuthMultiAuthReauthsWhileAuthenticating, rlTelnetLoginBanner=rlTelnetLoginBanner, facsAccessVoilation=facsAccessVoilation, rldot1sMstpInstanceRootCost=rldot1sMstpInstanceRootCost, rndBrgFeatures=rndBrgFeatures, rlBrgMacSwYellowBoundary=rlBrgMacSwYellowBoundary, rndApplications=rndApplications, rldot1xAuthMultiSessionStatsPortNumber=rldot1xAuthMultiSessionStatsPortNumber, rldot1dPriorityMibVersion=rldot1dPriorityMibVersion, rlMD5KeyTable=rlMD5KeyTable, rlMirVlanBaseLogicalPortsVlanTag=rlMirVlanBaseLogicalPortsVlanTag, rldot1xAuthMultiSourceMac=rldot1xAuthMultiSourceMac, rlPortGvrpErrorStatisticsInvAlen=rlPortGvrpErrorStatisticsInvAlen, rlPortGvrpStatisticsSLA=rlPortGvrpStatisticsSLA, rlDigitalKeyManage=rlDigitalKeyManage, rldot1dStpPortStateNotForwarding=rldot1dStpPortStateNotForwarding, rlAggregateVlanIndex=rlAggregateVlanIndex, rlStormCtrlUnknownMulticastEnable=rlStormCtrlUnknownMulticastEnable, rlSsh=rlSsh, rldot1xAuthMultiBackendAccessChallenges=rldot1xAuthMultiBackendAccessChallenges, rldot1dStpType=rldot1dStpType, rldot1dStpPortDampStable=rldot1dStpPortDampStable, rlpBridgeMIBObjects=rlpBridgeMIBObjects, rldot1xExtAuthSessionStatsEntry=rldot1xExtAuthSessionStatsEntry, rnd=rnd, rlPortGvrpErrorStatisticsInvEvent=rlPortGvrpErrorStatisticsInvEvent, rlGalileo=rlGalileo, rldot1xAuthMultiStatsTable=rldot1xAuthMultiStatsTable, rlMD5KeyStartGenerate=rlMD5KeyStartGenerate, rldot1dStpPortFilterBpdu=rldot1dStpPortFilterBpdu, rldot1wRStpForceVersionState=rldot1wRStpForceVersionState, rldot1xAuthMultiSessionFramesTx=rldot1xAuthMultiSessionFramesTx, rlPortGvrpStatisticsRLIn=rlPortGvrpStatisticsRLIn, rlGmrpVlanTable=rlGmrpVlanTable, rldot1sMstpInstanceTimeSinceTopologyChange=rldot1sMstpInstanceTimeSinceTopologyChange, rlMirMaxNumOfMRIsAfterReset=rlMirMaxNumOfMRIsAfterReset, rldot1pPriorityMapEntry=rldot1pPriorityMapEntry, swInterfaces=swInterfaces, rlPortGvrpStatisticsEntry=rlPortGvrpStatisticsEntry, rldot1xAuthMultiEapolReqIdFramesTx=rldot1xAuthMultiEapolReqIdFramesTx, rldot1xAuthMultiEntersAuthenticating=rldot1xAuthMultiEntersAuthenticating, rndBaseBootVersion=rndBaseBootVersion, rlStormCtrlSetDefaultMulticastEnable=rlStormCtrlSetDefaultMulticastEnable, rldot1pPriorityMapState=rldot1pPriorityMapState, rlDhcpApprovalActionAddress=rlDhcpApprovalActionAddress, rsIpZhrConnectionsTableOverflow=rsIpZhrConnectionsTableOverflow, rlStormCtrlMibVersion=rlStormCtrlMibVersion, rlSwPackageVersionEntry=rlSwPackageVersionEntry, rlDhcpApprovalWaitingAddress=rlDhcpApprovalWaitingAddress, rndCommunityMngStationAddr=rndCommunityMngStationAddr, rndImage1Name=rndImage1Name, rsIpZhrReqStaticConnNotAccepted=rsIpZhrReqStaticConnNotAccepted, rndBrgVersion=rndBrgVersion, rlAggregateVlan=rlAggregateVlan, rsWSDRedundancySwitch=rsWSDRedundancySwitch, rldot1xPortStatusAuthorizedTrap=rldot1xPortStatusAuthorizedTrap, rlEnv=rlEnv, rndImageSize=rndImageSize, errorsDuringInit=errorsDuringInit, rldot1xAuthMultiSessionTime=rldot1xAuthMultiSessionTime, rlPortGvrpErrorStatisticsInvAval=rlPortGvrpErrorStatisticsInvAval, rldot1sMstpInstanceRemainingHopes=rldot1sMstpInstanceRemainingHopes, rndMng=rndMng, rlNicRedundancy=rlNicRedundancy, rlMridTable=rlMridTable, rlDhcpClCommandEntry=rlDhcpClCommandEntry, rlAggregateSubVlanIfIndex=rlAggregateSubVlanIfIndex, rlMD5KeyId=rlMD5KeyId, rldot1xAuthMultiAuthEapStartsWhileAuthenticating=rldot1xAuthMultiAuthEapStartsWhileAuthenticating, rlBrgMacSwOperTrapCount=rlBrgMacSwOperTrapCount, rldot1dTpAgingTimeMin=rldot1dTpAgingTimeMin, rlMacMulticast=rlMacMulticast, rldot1dStpTrapVrblVID=rldot1dStpTrapVrblVID, rldot1dStpPortRestrictedRole=rldot1dStpPortRestrictedRole, rldot1sMstpExtPortDesignatedRegionalCost=rldot1sMstpExtPortDesignatedRegionalCost, rlBonjour=rlBonjour, rlSwPackageVersionName=rlSwPackageVersionName, rsTunning=rsTunning, rlMD5KeyChainKeyId=rlMD5KeyChainKeyId, rlDhcpApprovalWaitingEntry=rlDhcpApprovalWaitingEntry, rlGlobalIpAddrTable=rlGlobalIpAddrTable, rsIpZhrVirtualIpAsSource=rsIpZhrVirtualIpAsSource, rlMirMibVersion=rlMirMibVersion, rldot1dStpLastChanged=rldot1dStpLastChanged, rlPortGvrpLeaveAllTime=rlPortGvrpLeaveAllTime, rldot1dStpTrapVrblifIndex=rldot1dStpTrapVrblifIndex, swIfTablePortUnLock=swIfTablePortUnLock, rlMngInf=rlMngInf, rldot1xAuthMultiEapolRespIdFramesRx=rldot1xAuthMultiEapolRespIdFramesRx, papFailedCommunication=papFailedCommunication, rndImage1Date=rndImage1Date, rlAggregateVlanStatus=rlAggregateVlanStatus, Percents=Percents, rlTrunkPortAddedTrap=rlTrunkPortAddedTrap, rldot1xSupplicantMacUnauthorizedTrap=rldot1xSupplicantMacUnauthorizedTrap, rldot1sMstpConfigurationName=rldot1sMstpConfigurationName, rldot1dStpRootPort=rldot1dStpRootPort, rndStackUnitNumber=rndStackUnitNumber, rlDhcpApprovalWaitingIfIndex=rlDhcpApprovalWaitingIfIndex, rlMirVlanBaseLogicalPortsReservedIfIndex=rlMirVlanBaseLogicalPortsReservedIfIndex, rlPortGvrpApplicantStatusValue=rlPortGvrpApplicantStatusValue, chapFailedCommunication=chapFailedCommunication, rlMD5KeyChainTable=rlMD5KeyChainTable, rlBrgMacAddFailedTrap=rlBrgMacAddFailedTrap, rldot1dStpVlanPortVlan=rldot1dStpVlanPortVlan, rldot1xUnAuthenticatedVlanEntry=rldot1xUnAuthenticatedVlanEntry, genGroupHWStatus=genGroupHWStatus, rndImage1Time=rndImage1Time, rlTerminalDebugModePassword=rlTerminalDebugModePassword, rlPortGvrpJoinTime=rlPortGvrpJoinTime, rlStormCtrlGroupBroadcastId=rlStormCtrlGroupBroadcastId, rlBridgeSecurity=rlBridgeSecurity, rndImage2Name=rndImage2Name, rldot1sMstpInstanceDesignatedRoot=rldot1sMstpInstanceDesignatedRoot, rlRCliEnablePassword=rlRCliEnablePassword, rlSocketMibVersion=rlSocketMibVersion)
mibBuilder.exportSymbols("RADLAN-MIB", rlIPmulticast=rlIPmulticast, rlPortGvrpErrorStatisticsClear=rlPortGvrpErrorStatisticsClear, rldot1dStpRootCost=rldot1dStpRootCost, rldot1xExtAuthSessionStatsTable=rldot1xExtAuthSessionStatsTable, rlTraceRoute=rlTraceRoute, rlRcliFileAction=rlRcliFileAction, rlPortGvrpRegistrationModeForbidden=rlPortGvrpRegistrationModeForbidden, rldot1dStpVlanPortDesignatedCost=rldot1dStpVlanPortDesignatedCost, rldot1xAuthMultiAuthEapStartsWhileAuthenticated=rldot1xAuthMultiAuthEapStartsWhileAuthenticated, rndInactiveArpTimeOut=rndInactiveArpTimeOut, rlStormCtrlSetDefaultUnknownUnicastRate=rlStormCtrlSetDefaultUnknownUnicastRate, rldot1pPriorityMapPortList=rldot1pPriorityMapPortList, rlWlanAccessPoint=rlWlanAccessPoint, rlPortGvrpStatisticsSLIn=rlPortGvrpStatisticsSLIn, rldot1dStpSeparatedBridgesPortEnable=rldot1dStpSeparatedBridgesPortEnable, rldot1sMStpInstancePortRole=rldot1sMStpInstancePortRole, rlAgentsCapabilitiesGroups=rlAgentsCapabilitiesGroups, rlSocketState=rlSocketState, rlIpFftSubOverflow=rlIpFftSubOverflow, rldot1dStpPortBelongToVlanDefault=rldot1dStpPortBelongToVlanDefault, rldot1dStpVlan=rldot1dStpVlan, rlStormCtrlGroupMulticastId=rlStormCtrlGroupMulticastId, rldot1sMstpInstancePortEntry=rldot1sMstpInstancePortEntry, rndActiveSoftwareFileTable=rndActiveSoftwareFileTable, rlBrgMacSwKeyType=rlBrgMacSwKeyType, rlCliTimer=rlCliTimer, rsSnmpSetRequestInSpecialCfgState=rsSnmpSetRequestInSpecialCfgState, vlanDynVlanAdded=vlanDynVlanAdded, resetRequired=resetRequired, rldot1sMstpInstanceHoldTime=rldot1sMstpInstanceHoldTime, rlStormCtrlGroupUnknownUnicastId=rlStormCtrlGroupUnknownUnicastId, rldot1sMstpInstanceTable=rldot1sMstpInstanceTable, rldot1xAuthMultiStatsPortNumber=rldot1xAuthMultiStatsPortNumber, rlDhcpApprovalActionEntry=rlDhcpApprovalActionEntry, rldot1sMstpInstanceHelloTime=rldot1sMstpInstanceHelloTime, rlPortGmrpTimersEntry=rlPortGmrpTimersEntry, dlPrivate=dlPrivate, rldot1dPriorityPortGroupEntry=rldot1dPriorityPortGroupEntry, rndCommunityAccess=rndCommunityAccess, rldot1xAuthMultiEapolStartFramesRx=rldot1xAuthMultiEapolStartFramesRx, rldot1dDeviceCapabilities=rldot1dDeviceCapabilities, rlVrrpEntriesDeleted=rlVrrpEntriesDeleted, rldot1dStpVlanPortEntry=rldot1dStpVlanPortEntry, ipxRipTblOverflow=ipxRipTblOverflow, frDLCIStatudChange=frDLCIStatudChange, rndImage2Version=rndImage2Version, rlPortGvrpErrorStatisticsEntry=rlPortGvrpErrorStatisticsEntry, rldot1dStpFloodBpduMethod=rldot1dStpFloodBpduMethod, rldot1sMstpInstanceRootPort=rldot1sMstpInstanceRootPort, rlPortGmrpTimersTable=rlPortGmrpTimersTable, rlSNMP=rlSNMP, rldot1sMstpInstanceTopChanges=rldot1sMstpInstanceTopChanges, rlFFT=rlFFT, NetNumber=NetNumber, rlStormCtrlTypeSupport=rlStormCtrlTypeSupport, rlBrgMacSwDynamicTables=rlBrgMacSwDynamicTables, rlStormCtrlSetDefaultBroadcastRate=rlStormCtrlSetDefaultBroadcastRate, rlPortGvrpRegistrationModeTable=rlPortGvrpRegistrationModeTable, rndActiveSoftwareFileEntry=rndActiveSoftwareFileEntry, stpLoopbackDetection=stpLoopbackDetection, rlStormCtrlRateUnitTypeSupport=rlStormCtrlRateUnitTypeSupport, rxOverflowHWFault=rxOverflowHWFault, rlBrgMacSwMaxTableNumber=rlBrgMacSwMaxTableNumber, forwardingTabOverflow=forwardingTabOverflow, rlTelnetSessionLoginTime=rlTelnetSessionLoginTime, rlAAAEap=rlAAAEap, rldot1dStpTrapVariable=rldot1dStpTrapVariable, rndCommunityPortSecurity=rndCommunityPortSecurity, rlBrgMacSwitch=rlBrgMacSwitch, rlStormCtrlMulticastRate=rlStormCtrlMulticastRate, rlArpSpoofingTable=rlArpSpoofingTable, rlPortGvrpStatisticsREmp=rlPortGvrpStatisticsREmp, rlHotSwapTrap=rlHotSwapTrap, rlStormCtrlEntry=rlStormCtrlEntry, rldot1xSupplicantMacAuthorizedTrap=rldot1xSupplicantMacAuthorizedTrap, rldot1sMstpExtPortInternalOperPathCost=rldot1sMstpExtPortInternalOperPathCost, rlCliPassword=rlCliPassword, rlPortGvrpStatisticsSJIn=rlPortGvrpStatisticsSJIn, rlAmap=rlAmap, rlArpSpoofingRemoteIpAddr=rlArpSpoofingRemoteIpAddr, rldot1sMstpPendingGroup=rldot1sMstpPendingGroup, rndBrgLicense=rndBrgLicense, rldot1xPortStatusUnauthorizedTrap=rldot1xPortStatusUnauthorizedTrap, rndCommunityString=rndCommunityString, rldot1dStpPortBpduReceived=rldot1dStpPortBpduReceived, rlDhcpApprovalActionMask=rlDhcpApprovalActionMask, rlSocketId=rlSocketId, rlGmrpVlanEnable=rlGmrpVlanEnable, rlPortGvrpOverrideGarp=rlPortGvrpOverrideGarp, vlanDynamicToStatic=vlanDynamicToStatic, rldot1dStpSeparatedBridgesTable=rldot1dStpSeparatedBridgesTable, rldot1sMstpInstancePriority=rldot1sMstpInstancePriority, rldot1xAuthMultiBackendResponses=rldot1xAuthMultiBackendResponses, rldot1xAuthMultiEntersConnecting=rldot1xAuthMultiEntersConnecting, rlSocketBlockMode=rlSocketBlockMode, rldot1xAuthMultiBackendAuthState=rldot1xAuthMultiBackendAuthState, rldot1dStpPortTable=rldot1dStpPortTable, rldot1dStpEnableByDefault=rldot1dStpEnableByDefault, rlPortGvrpStatisticsRJE=rlPortGvrpStatisticsRJE, rlMD5KeyEntry=rlMD5KeyEntry, rldot1dStpMibVersion=rldot1dStpMibVersion, RlStormCtrlRateUnit=RlStormCtrlRateUnit, rldot1dStpEnable=rldot1dStpEnable, rlMD5KeyStartAccept=rlMD5KeyStartAccept, rl3sw2swTablesPollingInterval=rl3sw2swTablesPollingInterval, rlStormCtrlSetDefaultRateType=rlStormCtrlSetDefaultRateType, rlCliFileEnableAfterReset=rlCliFileEnableAfterReset, rlArpSpoofingLocalIpAddr=rlArpSpoofingLocalIpAddr, rlStormCtrlRateDependencyBetweenTypes=rlStormCtrlRateDependencyBetweenTypes, rlStormCtrlMulticastEnable=rlStormCtrlMulticastEnable, rldot1xGuestVlanSupported=rldot1xGuestVlanSupported, rldot1xUnAuthenticatedVlanSupported=rldot1xUnAuthenticatedVlanSupported, rldot1sMstpInstancePortPriority=rldot1sMstpInstancePortPriority, rlMirInterfaceMrid=rlMirInterfaceMrid, rlStormCtrlRateType=rlStormCtrlRateType, rldot1dStpTopChanges=rldot1dStpTopChanges, rldot1dStpVlanEntry=rldot1dStpVlanEntry, rldot1dPriorityPortGroupTable=rldot1dPriorityPortGroupTable, rlBrgMacSwOldEntryDeleteMode=rlBrgMacSwOldEntryDeleteMode, rlTelnetUsersEntry=rlTelnetUsersEntry, rldot1xAuthMultiEapolLogoffFramesRx=rldot1xAuthMultiEapolLogoffFramesRx, vlanDynPortAdded=vlanDynPortAdded, rldot1xAuthMultiEapolFramesRx=rldot1xAuthMultiEapolFramesRx, rldot1xAuthMultiSessionStatsEntry=rldot1xAuthMultiSessionStatsEntry, rlPortGvrpLeaveTime=rlPortGvrpLeaveTime, rldot1dStp=rldot1dStp, rlJumboFramesCurrentStatus=rlJumboFramesCurrentStatus, rldot1xAuthMultiSessionId=rldot1xAuthMultiSessionId, rlPortGmrpLeaveTime=rlPortGmrpLeaveTime, rldot1xUserBasedVlanPorts=rldot1xUserBasedVlanPorts, rldot1sMstpInstanceForwardDelay=rldot1sMstpInstanceForwardDelay, rndBootPServerAddress=rndBootPServerAddress, rlBroadcom=rlBroadcom, rlAggregateSubVlanTable=rlAggregateSubVlanTable, rldot1xGuestVlanPorts=rldot1xGuestVlanPorts, rldot1wRStpEdgePortStatus=rldot1wRStpEdgePortStatus, rlPortCopyVlanTagging=rlPortCopyVlanTagging, rldot1dStpEdgeportSupportInStp=rldot1dStpEdgeportSupportInStp, rlStormCtrlUnknownUnicastRate=rlStormCtrlUnknownUnicastRate, rlDot1xAuthSessionAuthenticMethod=rlDot1xAuthSessionAuthenticMethod, rldot1dStpVlanTable=rldot1dStpVlanTable, rldot1xGuestVlanVID=rldot1xGuestVlanVID, rlTelnetPassword=rlTelnetPassword, rlDhcpApprovalActionTable=rlDhcpApprovalActionTable, rldot1dStpVlanEnable=rldot1dStpVlanEnable, rlSmon=rlSmon)
|
from aws_cdk import (
aws_lambda as lambda_,
aws_s3 as s3,
App, Duration, Stack
)
# Creates reference to already existing s3 bucket and lambda code
class LambdaS3Code(Stack):
def __init__(self, app: App, id: str) -> None:
super().__init__(app, id)
lambda_code_bucket = s3.Bucket.from_bucket_attributes(
self, 'LambdaCodeBucket',
bucket_name='my-lambda-code-bucket'
)
lambdaFn = lambda_.Function(
self, 'Singleton',
handler='index.main',
code=lambda_.S3Code(
bucket=lambda_code_bucket,
key='my-lambda.py'
),
runtime=lambda_.Runtime.PYTHON_3_7,
timeout=Duration.seconds(300)
)
app = App()
LambdaS3Code(app, "LambdaS3CodeExample")
app.synth()
|
import json, re
def addAlarm(textToSpeech, phrases, text):
textString = "";
for index, string in enumerate(text):
textString += string + (index == len(text) - 1 and "" or " ");
textString = textString.replace("a.m.", "AM").replace("p.m.", "PM");
alarmTime = textString.split(" at ")[1].upper().replace("PM ", "PM").replace("AM ", "AM"); # Jank AF
speechString = "I have added a new alarm at " + alarmTime;
textToSpeech(speechString);
if ("AM" in alarmTime):
alarmTime = alarmTime.replace(" AM", "") + ":59 AM";
else:
alarmTime = alarmTime.replace(" PM", "") + ":59 PM";
file = open("data.json");
fileContents = file.read();
data = json.loads(fileContents);
file.close();
data["alarms"].append({
# "sound": alarmSound, # Need to add custom alarm sounds
"time": alarmTime
});
file = open("data.json", "w");
file.write(json.dumps(data));
file.close();
def removeAlarm(textToSpeech, phrases, text, server):
textString = "";
for index, string in enumerate(text):
textString += string + (index == len(text) - 1 and "" or " ");
textString = textString.replace("a.m.", "AM").replace("p.m.", "PM");
alarmTime = textString.split(" at ")[1];
speechString = "I have removed the alarm at " + alarmTime;
textToSpeech(speechString);
if ("AM" in alarmTime):
alarmTime = alarmTime.replace(" AM", "") + ":59 AM";
else:
alarmTime = alarmTime.replace(" PM", "") + ":59 PM";
file = open("data.json");
fileContents = file.read();
data = json.loads(fileContents);
alarms = data["alarms"];
for index, alarm in enumerate(alarms):
if (alarm["time"] == alarmTime):
data["alarms"][index] = None;
file.write(json.dumps(data));
file.close(); |
#Legacy Grammar Learner testL fill in ULL Project Plan Parses spreadshit
#language-learning/src/grammar_learner/pqa_table.py 80725, renamed pqa05 #80802
import os
import sys
import time
from ..common.cliutils import handle_path_string
from ..grammar_tester.grammartester import test_grammar
from ..grammar_tester.optconst import * # import * only allowed at module level
from .poc05 import learn_grammar, params
def pqa_meter(input_path, output_grammar, corpus_path, reference_path, runs=(1,1), **kwargs):
#80720 test_grammar_wrapped 2.0
output_path = output_grammar
dict_path = input_path
grammar_path = output_grammar
template_path = handle_path_string("tests/test-data/dict/poc-turtle")
linkage_limit = kwargs['linkage_limit'] #100
options = BIT_SEP_STAT | BIT_LG_EXE | BIT_NO_LWALL | BIT_NO_PERIOD | BIT_STRIP | BIT_RM_DIR | BIT_DPATH_CREATE | BIT_LOC_LANG | BIT_PARSE_QUALITY | BIT_ULL_IN #| BIT_OUTPUT_DIAGRAM #| BIT_SEP_STAT
#80719 added BIT_ULL_IN for table_cds - Child Directed Speech
#80719: BIT_CAPS = preserve caps
pa, pq, pqa = test_grammar(corpus_path, output_path, dict_path, \
grammar_path, template_path, linkage_limit, options, reference_path)
return pa, pq, pqa
def table_damb(lines, out_dir, cps=(0,0), rps=(0,0), runs=(1,1), **kwargs): #-lines, module_path,
#80720: table_amb 2.0: module_path, corpus_path, test_path built-in
# cps,rps: tuples len=2 corpus_paths, reference_paths for Amb and disAmb corpora
module_path = os.path.abspath(os.path.join('..'))
# if module_path not in sys.path: sys.path.append(module_path)
rpd = module_path + '/data/POC-English-Amb/MST-fixed-manually/poc-english_ex-parses-gold.txt'
spaces = ''
if kwargs['context'] == 1:
spaces += 'c'
else: spaces += 'd'
if kwargs['word_space'] == 'vectors':
spaces += 'DRK'
else: spaces += 'ILE'
if kwargs['grammar_rules'] == 1:
spaces += 'c'
else: spaces += 'd'
details = []
average = []
for i,line in enumerate(lines):
corpus = line[1]
if corpus == 'POC-English-disAmb':
if cps[1] == 0:
cp = rpd # default reference_path
else: cp = cps[1]
if rps[1] == 0:
rp = rpd
else: rp = rps[1]
else:
if cps[0] == 0:
cp = rpd
else: cp = cps[0]
if rps[0] == 0:
rp = rpd
else: rp = rps[0]
dataset = line[2]
if line[3] != 0:
kwargs['left_wall'] = 'LEFT-WALL'
lw = 'LW'
else:
kwargs['left_wall'] = ''
lw = ' -- '
if line[4] != 0:
kwargs['period'] = True
dot = ' + '
else:
kwargs['period'] = False
dot = ' -- '
gen = line[5] # none | rules | categories | both
if gen in ['rules','both']:
kwargs['rules_generalization'] = 'jaccard'
else: kwargs['rules_generalization'] = 'off'
if gen in ['categories','both']:
kwargs['categories_generalization'] = 'jaccard'
else: kwargs['categories_generalization'] = 'off'
if kwargs['grammar_rules'] == 1 and gen not in ['none','off']: continue
ip, oc, og = params(corpus, dataset, module_path, out_dir, **kwargs)
# ip,oc,og :: input_path, output_categories, output_grammar
pa = []
pq = []
rules = []
for j in range(runs[0]):
re = learn_grammar(ip, oc, og, **kwargs)
for i in range(runs[1]):
a, q, qa = pqa_meter(re['grammar_file'], og, cp, rp, **kwargs)
pa.append(a)
pq.append(q)
rules.append(re['grammar_rules'])
rulestr = ' ' + str(re['grammar_rules']) + ' '
line = [line[0], corpus, dataset, lw, dot, gen, spaces, rulestr, \
str(int(round(a,0)))+'%', str(int(round(q,0)))+'%']
details.append(line)
paa = int(round(sum(pa)/len(pa), 0))
pqa = int(round(sum(pq)/len(pq), 0))
rules_avg = int(round(sum(rules)/len(rules), 0))
avg = [line[0], corpus, dataset, lw, dot, gen, spaces, rules_avg, \
str(paa)+'%', str(pqa)+'%']
average.append(avg)
return average, details
def table_cds(lines, out_dir, cp, rp, runs=(1,1), **kwargs):
# cp,rp: corpus_path, rp: reference_path for grammar tester
module_path = os.path.abspath(os.path.join('..'))
# if module_path not in sys.path: sys.path.append(module_path)
# from poc05 import learn_grammar, params
spaces = ''
if kwargs['context'] == 1:
spaces += 'c'
else: spaces += 'd'
if kwargs['word_space'] == 'vectors':
spaces += 'DRK'
else: spaces += 'ILE'
if kwargs['grammar_rules'] == 1:
spaces += 'c'
else: spaces += 'd'
details = []
average = []
for i,line in enumerate(lines):
corpus = line[1]
dataset = line[2]
if line[3] != 0:
kwargs['left_wall'] = 'LEFT-WALL'
lw = 'LW'
else:
kwargs['left_wall'] = ''
lw = ' -- '
if line[4] != 0:
kwargs['period'] = True
dot = ' + '
else:
kwargs['period'] = False
dot = ' -- '
gen = line[5] # none | rules | categories | both
if gen in ['rules','both']:
kwargs['rules_generalization'] = 'jaccard'
else: kwargs['rules_generalization'] = 'off'
if gen in ['categories','both']:
kwargs['categories_generalization'] = 'jaccard'
else: kwargs['categories_generalization'] = 'off'
if kwargs['grammar_rules'] == 1 and gen != 'none': continue
ip, oc, og = params(corpus, dataset, module_path, out_dir, **kwargs)
pa = []
pq = []
rules = []
for j in range(runs[0]):
try:
re = learn_grammar(ip, oc, og, **kwargs)
for i in range(runs[1]):
a, q, qa = pqa_meter(re['grammar_file'], og, cp, rp, **kwargs)
pa.append(a)
pq.append(q)
rules.append(re['grammar_rules'])
rulestr = ' ' + str(re['grammar_rules']) + ' '
dline = [line[0], corpus, dataset, lw, dot, gen, spaces, rulestr, \
str(int(round(a,0)))+'%', str(int(round(q,0)))+'%']
details.append(dline)
except:
print('try: re = learn_grammar(ip, oc, og, **kwargs) ⇒ except :(')
pa.append(0)
pq.append(0)
rules.append(0)
det_line = [line[0], corpus, dataset, lw, dot, gen, spaces, ' fail ','---','---']
details.append(det_line)
#continue
if len(pa) > 0:
paa = int(round(sum(pa)/len(pa), 0))
pqa = int(round(sum(pq)/len(pq), 0))
non_zero_rules = [x for x in rules if x > 0]
if len(non_zero_rules) > 0:
average_rules_n = int(round(sum(non_zero_rules)/len(non_zero_rules), 0))
else: average_rules_n = 0
avg_line = [line[0], corpus, dataset, lw, dot, gen, spaces, \
average_rules_n, str(paa)+'%', str(pqa)+'%']
average.append(avg_line)
return average, details
#80802 poc05.py restructured. pqa_table.py ⇒ further dev,
#this legacy pqa_table.py renamed pqa05 to compare dev with poc05 baseline
#FIXME:DEL with poc05 POC.0.5 after major dev
|
import tkinter as tk
# Event handler function
def doorbell(event):
print("You rang the doorbell!!")
window = tk.Tk()
window.geometry("300x200")
alabel = tk.Label(text="Banana")
alabel.grid(column=0, row=0)
blabel = tk.Label(text="Apple")
blabel.grid(column=1, row=0)
button = tk.Button(window, text="Doorbell")
button.grid(column=0)
button2 = tk.Button(window, text="10")
button2.grid(column=1, row=1)
button.bind("<Button-1>", doorbell)
window.mainloop()
|
from operator import itemgetter
from datetime import date
import calendar
def print_header(name, account_no, balance):
print("\nname:", name, " account:", account_no, " original balance:", "$" + str(balance))
def get_date(date_string):
year = int(date_string[:4])
month = int(date_string[4:6])
day = int(date_string[6:8])
return date(year, month, day)
def read_charges(charges_file):
charges = list()
charges_dict = dict()
for charge_string in open(charges_file):
charge_info_list = charge_string.strip().split(',')
charge_info = dict()
charge_info['vendor'] = charge_info_list[0]
charge_info['date'] = charge_info_list[1]
charge_info['charge'] = charge_info_list[2]
charges.append(charge_info)
if charge_info['vendor'] not in charges_dict:
charges_dict[charge_info['vendor']] = list()
charges_dict[charge_info['vendor']].append(charge_info)
return charges, charges_dict
def print_charges_from_list(charges_list, balance):
print("Vendor Year Month Day Charge Balance")
print("----------------------- ---- --------- --- ------- --------")
for charge_info in charges_list:
balance = balance - float(charge_info['charge'])
charge_date = get_date(charge_info['date'])
print("{:24} {:4} {:9} {:4} {:8,.2f} {:8,.2f}".format(charge_info['vendor'],
charge_date.year,
calendar.month_name[charge_date.month],
charge_date.day,
float(charge_info['charge']),
balance))
#-----------------------------------------------------------------------------
# Initialize and print account information
bal = 1000.00
name = "Chuck Black"
acct_no = "01123581321"
print_header(name, acct_no, bal)
# Read charges
charges_list, charges_dict = read_charges("m00_charges-file")
bal = 1000.00
print("\nSorted (by date) charges:")
print_charges_from_list(sorted(charges_list, key=itemgetter('date')), bal)
|
from typing import Dict
from gym.spaces import space
from malib.utils.episode import EpisodeKey
from tests.algorithm import AlgorithmTestMixin
from gym import spaces
import numpy as np
from malib.algorithm.mappo import CONFIG, MAPPO, MAPPOLoss, MAPPOTrainer
import os
import shutil
import pytest
custom_config = CONFIG["policy"]
trainer_config = CONFIG["training"]
custom_config["use_rnn"] = True
model_config = {
"initialization": {
"use_orthogonal": True,
"gain": 1.0,
},
"actor": {
"network": "mlp",
"layers": [{"units": 8, "activation": "ReLU"}],
"output": {"activation": False},
},
"critic": {
"network": "mlp",
"layers": [{"units": 8, "activation": "ReLU"}],
"output": {"activation": False},
},
}
test_obs_shape = (3,)
test_action_dim = 2
@pytest.mark.parametrize("use_rnn", [True, False], scope="class")
@pytest.mark.parametrize("use_vtrace", [True, False], scope="class")
class TestMAPPO(AlgorithmTestMixin):
@pytest.fixture(autouse=True)
def setUp(self, use_rnn, use_vtrace):
self._algorithm_to_test = self.make_algorithm(use_rnn, use_vtrace)
self._trainer_to_test, self._trainer_config = self.make_trainer_and_config()
self._loss_to_test = self.make_loss()
self._trainer_config.update({"optimizer": "Adam", "lr": 1e-3})
def make_algorithm(self, *args):
use_rnn, use_vtrace = args
custom_config["global_state_space"] = {
"agent_0": spaces.Box(low=0, high=1, shape=test_obs_shape)
}
custom_config["use_rnn"] = use_rnn
custom_config["return_mode"] = "vtrace" if use_vtrace else "gae"
return MAPPO(
registered_name="MAPPO",
observation_space=spaces.Box(low=0, high=1, shape=test_obs_shape),
action_space=spaces.Discrete(n=test_action_dim),
model_config=model_config,
custom_config=custom_config,
env_agent_id="agent_0",
)
def make_trainer_and_config(self):
return MAPPOTrainer("test_trainer"), trainer_config
def make_loss(self):
return MAPPOLoss()
def build_env_inputs(self) -> Dict:
action_mask = np.zeros((4, test_action_dim))
action_mask[:, 0] = 1
return {
EpisodeKey.CUR_OBS: np.zeros((4,) + test_obs_shape),
EpisodeKey.CUR_STATE: np.zeros((4,) + test_obs_shape),
EpisodeKey.RNN_STATE: self.algorithm.get_initial_state(batch_size=4),
EpisodeKey.DONE: np.zeros((4, 1)),
EpisodeKey.ACTION_MASK: action_mask,
}
def build_train_inputs(self) -> Dict:
n_agent, batch_size, traj_len = 4, 32, 100
num_rnn_layer = custom_config["rnn_layer_num"]
rnn_states = self.algorithm.get_initial_state(
batch_size=n_agent * batch_size * traj_len
)
actor_rnn_state = rnn_states[0].reshape(
(batch_size, traj_len, n_agent, num_rnn_layer, -1)
)
critic_rnn_state = rnn_states[1].reshape(
(batch_size, traj_len, n_agent, num_rnn_layer, -1)
)
return {
EpisodeKey.CUR_OBS: np.zeros(
(
batch_size,
traj_len,
n_agent,
)
+ test_obs_shape
),
EpisodeKey.CUR_STATE: np.zeros(
(
batch_size,
traj_len,
n_agent,
)
+ test_obs_shape
),
EpisodeKey.DONE: np.zeros((batch_size, traj_len, n_agent, 1)),
EpisodeKey.REWARD: np.zeros((batch_size, traj_len, n_agent, 1)),
EpisodeKey.ACTION: np.zeros((batch_size, traj_len, n_agent, 1)),
EpisodeKey.ACTION_DIST: np.ones(
(batch_size, traj_len, n_agent, test_action_dim)
)
/ test_action_dim,
EpisodeKey.STATE_VALUE: np.zeros((batch_size, traj_len, n_agent, 1)),
"return": np.zeros((batch_size, traj_len, n_agent, 1)),
EpisodeKey.ACTION_MASK: np.zeros((batch_size, traj_len, n_agent, 1)),
EpisodeKey.RNN_STATE + "_0": actor_rnn_state,
EpisodeKey.RNN_STATE + "_1": critic_rnn_state,
}
def test_dump_and_load(self):
dump_dir = "play"
os.makedirs(dump_dir)
self.algorithm.dump(dump_dir)
MAPPO.load(dump_dir, env_agent_id="agent_0")
shutil.rmtree(dump_dir)
def test_value_function(self):
return self.algorithm.value_function(**self.build_train_inputs())
def test_prepare(self):
self.algorithm.prep_rollout()
self.algorithm.prep_training()
|
from django.db import models
from uuid import uuid4
from supersaver.constants import *
from country.models import Country
from source.models import DataSource
from common.models import Property
class Retailer (models.Model):
# Normalised name in lower case
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
name = models.CharField(max_length=256, null=False, blank=False, db_index=True)
display_name = models.CharField(max_length=256, null=False, blank=False)
site = models.URLField(max_length=256, null=True, blank=False)
logo_url = models.URLField(max_length=256, null=True, blank=False)
country = models.ForeignKey(Country, on_delete=models.PROTECT, null=False, related_name='retailers')
datasource = models.ForeignKey(DataSource, on_delete=models.PROTECT, null=False, related_name='retailers')
def save(self, **kwargs):
self.name = self.name.lower()
super().save(**kwargs)
def __repr__(self):
return 'Retailer: id={0}, name={1}, display_name={2}, site={3}, logo={4}, country={5}, datasource={6}'\
.format(self.pk, self.name, self.display_name,
self.site, self.logo_url, self.country_id, self.datasource_id)
class RetailerProperty (Property):
"""
Retailer property bag.
"""
retailer = models.ForeignKey(Retailer, on_delete=models.CASCADE, null=False, related_name='properties')
def __repr__(self):
return 'RetailerProperty: id={0}, retailer={1}, name={2}, value={3}'.format(
self.pk,
self.retailer_id,
self.name,
self.value
)
|
import argparse
import bottle
import logging
import os
import random
import re
import urllib
import urllib2
import pyhelix.spectator as spectator
class CodeRunner(object):
"""
A class that will find nodes that run code and dispatch work
"""
def __init__(self, cluster, resource, host, port, zk_svr):
"""
Initialize the CodeRunner
Args:
cluster: the cluster id
resource: the resource id
host: the hostname that this router can be reached on
port: the port to use to send data
zk_svr: host:port of a ZooKeeper server
"""
self._conn = spectator.SpectatorConnection(cluster, zk_svr)
self._s = None
self._resource = resource
self._app = bottle.Bottle()
self._host = host
self._port = port
self._route()
def start(self):
"""
Start the CodeRunner
"""
self._conn.connect()
self._s = self._conn.spectate(self._resource)
self._app.run(host='0.0.0.0', port=self._port)
def stop(self):
"""
Stop the CodeRunner
"""
self._conn.disconnect()
def show_index(self):
"""
Called when the home page is requested
Returns:
A page with a box to input code where to run it
"""
participants = self._s.get_participants('ONLINE')
participants = [p['id'] for p in participants]
return bottle.template(
'index', host=self._host, port=self._port,
participants=participants)
def run_program(self):
"""
Called when a request with a program to run comes in
Returns:
stdout result of running the program
"""
prog = str(bottle.request.forms.get('prog'))
node = str(bottle.request.forms.get('participant'))
logging.info('Selected participant(s): {0}'.format(node))
pattern = node
if node == 'star':
pattern = '.*'
elif node == 'random':
participants = self._s.get_participants('ONLINE')
if len(participants) > 0:
pattern = random.sample(participants, 1)[0]['id']
outputs = self._run_on_nodes(prog, pattern)
return bottle.template('result', results=outputs, prog=prog)
def static_files(self, filename):
"""
Static route to files
Args:
filename: name of the static file
Returns:
a file handle
"""
path = os.path.dirname(os.path.realpath(__file__)) + '/static'
return bottle.static_file(filename, root=path)
def _run_on_nodes(self, prog, pattern):
"""
Run a program on all machines that match a pattern.
Args:
prog: The text of the program
pattern: The machine pattern
Returns:
The aggregate output
"""
outputs = []
pattern = re.compile(pattern, re.IGNORECASE)
participants = self._s.get_participants('ONLINE')
for participant in participants:
if pattern.match(participant['id']):
label = participant['id']
host = participant['simpleFields']['HELIX_HOST']
port = participant['simpleFields']['HELIX_PORT']
values = {'prog': prog}
data = urllib.urlencode(values)
result = urllib2.urlopen(
'http://{0}:{1}/run'.format(host, port), data)
output = label, result.read()
outputs.append(output)
return outputs
def _route(self):
"""
Route HTTP requests (private)
"""
self._app.route('/', callback=self.show_index)
self._app.route('/run', method='POST', callback=self.run_program)
self._app.route('/static/<filename>', callback=self.static_files)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--zkSvr', required=True, type=str, help='host:port of ZooKeeper')
parser.add_argument('--host', required=True, type=str, help='hostname')
parser.add_argument('--port', required=True, type=str, help='port')
args = parser.parse_args()
r = CodeRunner(
'coderunner-cluster', 'coderunner', args.host, args.port, args.zkSvr)
r.start()
r.stop()
|
import logging
from aiohttp import web
from cen_uiu import assets
Logger = logging.getLogger(__name__)
STATIC_FOLDER = assets.__path__[0]
INDEX_HTML = assets.__path__[0] + "/index.html"
async def index(request):
return web.FileResponse(INDEX_HTML)
def create_app() -> web.Application:
Logger.debug("creating web application")
app = web.Application(logger=Logger)
app.router.add_get("/", index, name="index")
app.router.add_static("/", STATIC_FOLDER)
web.run_app
return app
async def run_app(app: web.Application, host: str, port: int):
Logger.info(f"Starting http server on: {host}:{port}")
runner = web.AppRunner(app, access_log=Logger)
await runner.setup()
site = web.TCPSite(runner, host, port)
await site.start()
await site._server.serve_forever()
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sb
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, confusion_matrix
def main():
df = pd.read_csv('KNN_Project_Data')
# print(df.head())
# sb.pairplot(data=df, hue='TARGET CLASS')
# plt.show()
scaler = StandardScaler()
data = df.drop('TARGET CLASS', axis=1)
scaler.fit(data)
scaled_features = scaler.transform(data)
df_feat = pd.DataFrame(scaled_features, columns=df.columns[:-1])
# print(df_feat.head())
X_train, X_test, y_train, y_test = train_test_split(df_feat, df['TARGET CLASS'], test_size=0.3, random_state=101)
remodel(1, X_train, X_test, y_train, y_test)
max_k = 40
error_rate = []
for i in range(1, max_k):
model = get_errors(i, X_train, X_test, y_train, y_test)
error_rate.append(model)
plt.figure(figsize=(10, 6))
plt.plot(range(1, max_k), error_rate, color='blue', linestyle='dashed', marker='o', markerfacecolor='red',
markersize=10)
plt.title('Error Rate vs K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
plt.show()
min_k = np.argmin(error_rate)
print("Minimum K = "+str(min_k))
remodel(min_k, X_train, X_test, y_train, y_test)
def get_errors(i, X_Train, X_Test, y_train, y_test):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_Train, y_train)
prediction = knn.predict(X_Test)
return np.mean(prediction != y_test)
def remodel(k, X_train, X_test, y_train, y_test):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
predictions = knn.predict(X_test)
print("Confusion Matrix")
print(confusion_matrix(y_test, predictions))
print("\nClassification Report:")
print(classification_report(y_test, predictions))
if __name__ == '__main__':
main()
|
#restart
print('')
def introduc():
print('Is this a restart?')
print('or a load')
def menu():
print('(0)to quit(1)Restart (2)Load')
def loopy():
pl = input()
while pl !=None:
pl = input()
if pl == "":
print("Quit trying to break things")
if pl == "0":
print('Breaking')
break
if pl == "1":
"""Restarts program writes defalts
into save slot previous save deleted"""
print('you have chosen to restart')
import time
print('')
print('Rewriting pervious save')
print('Restarting now')
text_file = open("name.txt", "w+")
text_file.write(".")
text_file.close()
text_file = open("curmov.txt", "w+")
text_file.write("0")
text_file.close()
time.sleep(3)
print('Restart complete')
print('Enjoy!')
text_file = open("renren.txt", "w+")
text_file.write("1")
text_file.close()
if pl == "2":
print('you have chosen to load previous save')
import time
print('loading data')
time.sleep(3)
print('load complete')
text_file = open("renren.txt", "w+")
text_file.write("0")
text_file.close()
print('Enjoy!')
## if pl == 42:
## print('The meaning og life')
## menu()
## if pl == 666:
## print('"Is the number of the beast~~~!"')
## menu()
## def breaki():
## print('Quit trying to break things')
## menu()
## if pl
def main():
introduc()
menu()
loopy()
##introduc()
##menu()
##loopy()
##main()
|
from flask import request
from flask.ext.restful import Resource, Api, marshal_with, fields, abort
from flask_restful_swagger import swagger
from jira import JIRA
from .models import DummyResult
from .models import HelloResult
from .models import IssueResult
from .errors import JsonRequiredError
from .errors import JsonInvalidError
options = {"server": "https://testhomelol.atlassian.net"}
class DummyEndpoint(Resource):
@swagger.operation(responseClass=DummyResult.__name__, nickname="dummy")
@marshal_with(DummyResult.resource_fields)
def get(self):
"""Return a DummyResult object
Lightweight response to let us confirm that the server is on-line"""
return DummyResult()
class GetIssueEndpoint(Resource):
@swagger.operation(responseClass=IssueResult.__name__, nickname="issue")
@marshal_with(IssueResult.resource_fields)
def get(self, issue_id):
"""Return a DummyResult object
Lightweight response to let us confirm that the server is on-line"""
jira = JIRA(
server="https://testhomelol.atlassian.net",
options={"verify": False},
basic_auth=("vovxox1@gmail.com", ""),
)
# print(jira.issue("TEST-1").fields.reporter.displayName)
return IssueResult(name=jira.issue("TEST-1").fields.reporter.displayName)
class HelloEndpoint(Resource):
@swagger.operation(
responseClass=HelloResult.__name__,
nickname="hello",
responseMessages=[
{"code": 400, "message": "Input required"},
{"code": 500, "message": "JSON format not valid"},
],
parameters=[
{
"name": "name",
"description": "JSON-encoded name",
"required": True,
"allowMultiple": False,
"dataType": "string",
"paramType": "body",
}
],
)
@marshal_with(HelloResult.resource_fields)
def post(self):
"""Return a HelloResult object"""
reqs = request.get_json()
if not reqs:
raise JsonRequiredError()
try:
reqs["name"]
return HelloResult(name=reqs["name"])
except KeyError:
raise JsonInvalidError()
|
#!/usr/bin/env python
import argparse,textwrap
from igf_data.utils.singularity_run_wrapper import singularity_run
description = textwrap.dedent(
"""
A script for running commands within a singularity container in HPC
USAGE:
python run_singularity_container.py
-i /path/SINGULARITY_IMAGE
-b /path/CONTAINER_MOUNTED_DIR
-a command
-a arg1
-a arg2
It will run the following singularity run command:
singularity run
--bind /path/CONTAINER_MOUNTED_DIR:/tmp
/path/SINGULARITY_IMAGE
command arg1 arg2
""")
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,description=description)
parser.add_argument('-i','--image_path', required=True, help='Singularity image path')
parser.add_argument('-b','--path_bind', required=True, help='Path to bind to singularity /tmp dir')
parser.add_argument('-a','--run_args', action='append', default=[], help='List of args for singularity run')
args = parser.parse_args()
image_path = args.image_path
path_bind = args.path_bind
run_args = args.run_args
if __name__=='__main__':
try:
res,singularity_run_cmd = \
singularity_run(
image_path=image_path,
path_bind=path_bind,
args_list=run_args)
except Exception as e:
raise ValueError("Failed to run singularity container, error: {0}".format(e)) |
"""
(4) Modifique a função acima de maneira que ela receba mais um parâmetro, a extrem-
idade que se deseja juntar.
Exemplo:
>> l = [0,1,2,2,4,0]
>> funcao_auxiliar2(l, ’dir’) #nome generico nao use esse nome
[0,0,1,2,2,4]
>> funcao_auxiliar2(l, ’esq’)
[1,2,2,4,0,0] """
def mover_zeros(numeros, direcao):
if direcao == "esquerda":
zeros = []
outros = []
for numero in numeros:
if numero == 0:
zeros.append(numero)
else:
outros.append(numero)
numeros = []
for numero in zeros:
numeros.append(numero)
for numero in outros:
numeros.append(numero)
elif direcao == "direita":
zeros = []
outros = []
for numero in numeros:
if numero == 0:
zeros.append(numero)
else:
outros.append(numero)
numeros = []
for numero in outros:
numeros.append(numero)
for numero in zeros:
numeros.append(numero)
return numeros
print(mover_zeros())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.