hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f3fd3a39f6f8344f7edaf6e766a2be377b027be
| 1,561
|
py
|
Python
|
ooobuild/lo/animations/animation_transform_type.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/animations/animation_transform_type.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/animations/animation_transform_type.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.animations
class AnimationTransformType(object):
"""
Const Class
Specifies the transformation type for an XAnimateTransform.
See Also:
`API AnimationTransformType <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1animations_1_1AnimationTransformType.html>`_
"""
__ooo_ns__: str = 'com.sun.star.animations'
__ooo_full_ns__: str = 'com.sun.star.animations.AnimationTransformType'
__ooo_type_name__: str = 'const'
TRANSLATE = 0
"""
defines a translation
"""
SCALE = 1
"""
defines a scale
"""
ROTATE = 2
"""
defines a rotation
"""
SKEWX = 3
"""
defines a skew transformation for x-axis
"""
SKEWY = 4
"""
defines a skew transformation for y-axis
"""
__all__ = ['AnimationTransformType']
| 26.913793
| 153
| 0.696989
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.animations
class AnimationTransformType(object):
"""
Const Class
Specifies the transformation type for an XAnimateTransform.
See Also:
`API AnimationTransformType <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1animations_1_1AnimationTransformType.html>`_
"""
__ooo_ns__: str = 'com.sun.star.animations'
__ooo_full_ns__: str = 'com.sun.star.animations.AnimationTransformType'
__ooo_type_name__: str = 'const'
TRANSLATE = 0
"""
defines a translation
"""
SCALE = 1
"""
defines a scale
"""
ROTATE = 2
"""
defines a rotation
"""
SKEWX = 3
"""
defines a skew transformation for x-axis
"""
SKEWY = 4
"""
defines a skew transformation for y-axis
"""
__all__ = ['AnimationTransformType']
| 0
| 0
| 0
|
4493ab87a277ae9aeceaa08ea8afc483f266aa67
| 1,709
|
py
|
Python
|
algorithm_analysis/highest_value_palindrome/benchmarking.py
|
gshaikov/python-uprojects
|
79b92d600ecfb71e78731982894238fd12fcd348
|
[
"MIT"
] | null | null | null |
algorithm_analysis/highest_value_palindrome/benchmarking.py
|
gshaikov/python-uprojects
|
79b92d600ecfb71e78731982894238fd12fcd348
|
[
"MIT"
] | null | null | null |
algorithm_analysis/highest_value_palindrome/benchmarking.py
|
gshaikov/python-uprojects
|
79b92d600ecfb71e78731982894238fd12fcd348
|
[
"MIT"
] | null | null | null |
import random
import timeit
import time
import re
from pathlib import Path
from typing import Tuple
from solution import (
highest_value_palindrome_suboptimal,
highest_value_palindrome_optimal,
)
TEST_DIR = Path(__file__).parent
random.seed(13)
def run(data_length):
"""
*data_length* is n in O(n)
Exmaple output:
Loops: 500, time per loop: 0.470337 ms
"""
big_number_str = str(random.randrange(data_length, data_length * 10))
big_number_len = len(big_number_str)
benchmark(benched_function)
| 29.465517
| 79
| 0.697484
|
import random
import timeit
import time
import re
from pathlib import Path
from typing import Tuple
from solution import (
highest_value_palindrome_suboptimal,
highest_value_palindrome_optimal,
)
TEST_DIR = Path(__file__).parent
random.seed(13)
def benchmark(benched_function, number=None) -> Tuple[int, int]:
timer = timeit.Timer(benched_function)
if number is None:
n_loops, time_taken = timer.autorange()
else:
n_loops, time_taken = timer.timeit(number=number)
time_per_loop_ms = round(time_taken / n_loops * 1000, 6)
print("Loops: {}, time per loop: {} ms".format(n_loops, time_per_loop_ms))
return n_loops, time_per_loop_ms
def run(data_length):
"""
*data_length* is n in O(n)
Exmaple output:
Loops: 500, time per loop: 0.470337 ms
"""
big_number_str = str(random.randrange(data_length, data_length * 10))
big_number_len = len(big_number_str)
def benched_function():
return highest_value_palindrome_optimal(big_number_str, big_number_len)
benchmark(benched_function)
def run_bigfile():
with (TEST_DIR / "big_test_case" / "bigfile.txt").open() as bigfile:
input_number = bigfile.readline()
input_number = re.search("[0-9]+", input_number).group(0)
with (TEST_DIR / "big_test_case" / "bigfile_answer.txt").open() as bigfile:
output_number = bigfile.readline()
output_number = re.search("[0-9]+", output_number).group(0)
start = time.perf_counter()
result = highest_value_palindrome_optimal(input_number, 21724)
end = time.perf_counter()
assert result == output_number
print("Length: {}, time: {}".format(len(input_number), end - start))
| 1,090
| 0
| 73
|
eb5d5d1319eb1558645cba95437f6f3e36e1bc65
| 3,117
|
py
|
Python
|
lib/utils.py
|
SudeepSarkar/equilibrium-propagation
|
ba6d9ee5426445e9ad91c96c816fa5287ff97258
|
[
"MIT"
] | 18
|
2020-01-07T11:25:45.000Z
|
2022-01-24T18:25:13.000Z
|
lib/utils.py
|
SudeepSarkar/equilibrium-propagation
|
ba6d9ee5426445e9ad91c96c816fa5287ff97258
|
[
"MIT"
] | null | null | null |
lib/utils.py
|
SudeepSarkar/equilibrium-propagation
|
ba6d9ee5426445e9ad91c96c816fa5287ff97258
|
[
"MIT"
] | 5
|
2020-06-26T23:37:22.000Z
|
2021-09-04T12:22:49.000Z
|
# MIT License
# Copyright (c) 2020 Simon Schug, João Sacramento
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
from lib import cost
def create_activations(name, n_layers):
"""
Create activation functions for every layer of the network.
Args:
name: Name of the activation function
n_layers: Number of layers
Returns:
List of activation functions for every layer
"""
if name == 'relu':
phi_l = torch.relu
elif name == "leaky_relu":
elif name == 'softplus':
phi_l = torch.nn.functional.softplus
elif name == 'sigmoid':
phi_l = torch.sigmoid
elif name == 'hard_sigmoid':
else:
raise ValueError(f'Nonlinearity \"{name}\" not defined.')
return [lambda x: x] + [phi_l] * (n_layers - 1)
def create_cost(name, beta):
"""
Create a supervised learning cost function used to nudge
the network towards a desired state during training.
Args:
name: Name of the cost function
beta: Scalar weighting factor of the cost function
Returns:
CEnergy object
"""
if name == "squared_error":
return cost.SquaredError(beta)
elif name == "cross_entropy":
return cost.CrossEntropy(beta)
else:
raise ValueError("Cost function \"{}\" not defined".format(name))
def create_optimizer(model, name, **kwargs):
"""
Create optimizer for the given model.
Args:
model: nn.Module whose parameters will be optimized
name: Name of the optimizer to be used
Returns:
torch.optim.Optimizer instance for the given model
"""
if name == "adagrad":
return torch.optim.Adagrad(model.parameters(), **kwargs)
elif name == "adam":
return torch.optim.Adam(model.parameters(), **kwargs)
elif name == "sgd":
return torch.optim.SGD(model.parameters(), **kwargs)
else:
raise ValueError("Optimizer \"{}\" undefined".format(name))
| 33.159574
| 80
| 0.68367
|
# MIT License
# Copyright (c) 2020 Simon Schug, João Sacramento
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
from lib import cost
def create_activations(name, n_layers):
"""
Create activation functions for every layer of the network.
Args:
name: Name of the activation function
n_layers: Number of layers
Returns:
List of activation functions for every layer
"""
if name == 'relu':
phi_l = torch.relu
elif name == "leaky_relu":
def phi_l(x): torch.nn.functional.leaky_relu(x, negative_slope=0.05)
elif name == 'softplus':
phi_l = torch.nn.functional.softplus
elif name == 'sigmoid':
phi_l = torch.sigmoid
elif name == 'hard_sigmoid':
def phi_l(x): torch.clamp(x, min=0, max=1)
else:
raise ValueError(f'Nonlinearity \"{name}\" not defined.')
return [lambda x: x] + [phi_l] * (n_layers - 1)
def create_cost(name, beta):
"""
Create a supervised learning cost function used to nudge
the network towards a desired state during training.
Args:
name: Name of the cost function
beta: Scalar weighting factor of the cost function
Returns:
CEnergy object
"""
if name == "squared_error":
return cost.SquaredError(beta)
elif name == "cross_entropy":
return cost.CrossEntropy(beta)
else:
raise ValueError("Cost function \"{}\" not defined".format(name))
def create_optimizer(model, name, **kwargs):
"""
Create optimizer for the given model.
Args:
model: nn.Module whose parameters will be optimized
name: Name of the optimizer to be used
Returns:
torch.optim.Optimizer instance for the given model
"""
if name == "adagrad":
return torch.optim.Adagrad(model.parameters(), **kwargs)
elif name == "adam":
return torch.optim.Adam(model.parameters(), **kwargs)
elif name == "sgd":
return torch.optim.SGD(model.parameters(), **kwargs)
else:
raise ValueError("Optimizer \"{}\" undefined".format(name))
| 68
| 0
| 60
|
53eab20428f2bfc80ff63f633f203e5206010ba6
| 8,558
|
py
|
Python
|
Lib/glyphsLib/writer.py
|
jenskutilek/glyphsLib
|
9271c32e5135dbaebfb852c3d40246967fbe232d
|
[
"Apache-2.0"
] | 80
|
2019-04-17T19:30:16.000Z
|
2022-03-25T17:00:48.000Z
|
Lib/glyphsLib/writer.py
|
jenskutilek/glyphsLib
|
9271c32e5135dbaebfb852c3d40246967fbe232d
|
[
"Apache-2.0"
] | 227
|
2019-04-19T07:03:21.000Z
|
2022-03-26T21:06:25.000Z
|
Lib/glyphsLib/writer.py
|
jenskutilek/glyphsLib
|
9271c32e5135dbaebfb852c3d40246967fbe232d
|
[
"Apache-2.0"
] | 22
|
2019-05-15T15:13:24.000Z
|
2022-03-17T09:12:16.000Z
|
# Copyright 2016 Georg Seifert. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: #www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glyphsLib.classes
from glyphsLib.types import floatToString5
import logging
import datetime
from collections import OrderedDict
from io import StringIO
"""
Usage
>> fp = open('Path/to/File.glyphs', 'w')
>> writer = Writer(fp)
>> writer.write(font)
>> fp.close()
"""
logger = logging.getLogger(__name__)
def dump(obj, fp):
"""Write a GSFont object to a .glyphs file.
'fp' should be a (writable) file object.
"""
writer = Writer(fp)
logger.info("Writing .glyphs file")
if hasattr(obj, "format_version"):
writer.format_version = obj.format_version
writer.write(obj)
def dumps(obj):
"""Serialize a GSFont object to a .glyphs file format.
Return a (unicode) str object.
"""
fp = StringIO()
dump(obj, fp)
return fp.getvalue()
NSPropertyListNameSet = (
# 0
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
# 16
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
# 32
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
# 48
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
False,
# 64
False,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
# 80
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
True,
# 96
False,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
# 112
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
)
| 23.255435
| 86
| 0.534704
|
# Copyright 2016 Georg Seifert. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: #www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glyphsLib.classes
from glyphsLib.types import floatToString5
import logging
import datetime
from collections import OrderedDict
from io import StringIO
"""
Usage
>> fp = open('Path/to/File.glyphs', 'w')
>> writer = Writer(fp)
>> writer.write(font)
>> fp.close()
"""
logger = logging.getLogger(__name__)
class Writer:
def __init__(self, fp, format_version=2):
# figure out whether file object expects bytes or unicodes
try:
fp.write(b"")
except TypeError:
fp.write("") # this better not fail...
# file already accepts unicodes; use it directly
self.file = fp
else:
# file expects bytes; wrap it in a UTF-8 codecs.StreamWriter
import codecs
self.file = codecs.getwriter("utf-8")(fp)
self.format_version = format_version
def write(self, rootObject):
self.writeDict(rootObject)
self.file.write("\n")
def writeDict(self, dictValue):
if hasattr(dictValue, "_serialize_to_plist"):
self.file.write("{\n")
dictValue._serialize_to_plist(self)
self.file.write("}")
return
self.file.write("{\n")
keys = dictValue.keys()
if not isinstance(dictValue, OrderedDict):
keys = sorted(keys)
for key in keys:
try:
if isinstance(dictValue, (dict, OrderedDict)):
value = dictValue[key]
else:
getKey = key
value = getattr(dictValue, getKey)
except AttributeError:
continue
if value is None:
continue
self.writeKeyValue(key, value)
self.file.write("}")
def writeArray(self, arrayValue):
self.file.write("(\n")
idx = 0
length = len(arrayValue)
if hasattr(arrayValue, "plistArray"):
arrayValue = arrayValue.plistArray()
for value in arrayValue:
self.writeValue(value)
if idx < length - 1:
self.file.write(",\n")
else:
self.file.write("\n")
idx += 1
self.file.write(")")
def writeUserData(self, userDataValue):
self.file.write("{\n")
keys = sorted(userDataValue.keys())
for key in keys:
value = userDataValue[key]
self.writeKey(key)
self.writeValue(value, key)
self.file.write(";\n")
self.file.write("}")
def writeKeyValue(self, key, value):
self.writeKey(key)
self.writeValue(value, key)
self.file.write(";\n")
def writeObjectKeyValue(self, d, key, condition=None, keyName=None, default=None):
value = getattr(d, key)
if condition == "if_true":
condition = bool(value)
if condition is None:
if default is not None:
condition = value != default
else:
condition = value is not None
if condition:
self.writeKey(keyName or key)
self.writeValue(value, key)
self.file.write(";\n")
def writeValue(self, value, forKey=None):
if hasattr(value, "plistValue"):
value = value.plistValue(format_version=self.format_version)
if value is not None:
self.file.write(value)
elif forKey in ["color", "strokeColor"] and hasattr(value, "__iter__"):
# We have to write color tuples on one line or Glyphs 2.4.x
# misreads it.
if self.format_version == 2:
self.file.write(str(tuple(value)))
else:
self.file.write("(")
for ix, v in enumerate(value):
self.file.write(str(v))
if ix < len(value) - 1:
self.file.write(",")
self.file.write(")")
elif isinstance(value, (list, glyphsLib.classes.Proxy)):
if isinstance(value, glyphsLib.classes.UserDataProxy):
self.writeUserData(value)
else:
self.writeArray(value)
elif isinstance(value, (dict, OrderedDict, glyphsLib.classes.GSBase)):
self.writeDict(value)
elif type(value) == float:
self.file.write(floatToString5(value))
elif type(value) == int:
self.file.write(str(value))
elif type(value) == bytes:
self.file.write("<" + value.hex() + ">")
elif type(value) == bool:
if value:
self.file.write("1")
else:
self.file.write("0")
elif type(value) == datetime.datetime:
self.file.write('"%s +0000"' % str(value))
else:
value = str(value)
if self.format_version < 3:
if forKey != "unicode":
value = escape_string(value)
else:
if _needs_quotes(value) or " " in value:
value = '"%s"' % value
self.file.write(value)
def writeKey(self, key):
key = escape_string(key)
self.file.write("%s = " % key)
def dump(obj, fp):
"""Write a GSFont object to a .glyphs file.
'fp' should be a (writable) file object.
"""
writer = Writer(fp)
logger.info("Writing .glyphs file")
if hasattr(obj, "format_version"):
writer.format_version = obj.format_version
writer.write(obj)
def dumps(obj):
"""Serialize a GSFont object to a .glyphs file format.
Return a (unicode) str object.
"""
fp = StringIO()
dump(obj, fp)
return fp.getvalue()
NSPropertyListNameSet = (
# 0
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
# 16
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
# 32
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
# 48
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
False,
# 64
False,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
# 80
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
True,
# 96
False,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
# 112
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
)
def _needs_quotes(string):
if len(string) == 0:
return True
# Does it need quotes because of special characters?
for c in string:
d = ord(c)
if d >= 128 or not NSPropertyListNameSet[d]:
return True
# Does it need quotes because it could be confused with a number?
try:
int(string)
except ValueError:
pass
else:
return True
try:
float(string)
except ValueError:
return False
else:
return True
def escape_string(string):
if _needs_quotes(string):
string = string.replace("\\", "\\\\")
string = string.replace('"', '\\"')
string = string.replace("\n", "\\012")
string = '"%s"' % string
return string
| 5,392
| -8
| 311
|
9d08b2f99ac10d617f01d45314d7e6c4e254f812
| 1,890
|
py
|
Python
|
main.py
|
mallocc/PythonRPG
|
6f8ae053f61d1b5070706276b850e96a27a10fe3
|
[
"Apache-2.0"
] | 1
|
2019-11-11T10:01:59.000Z
|
2019-11-11T10:01:59.000Z
|
main.py
|
mallocc/PythonRPG
|
6f8ae053f61d1b5070706276b850e96a27a10fe3
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
mallocc/PythonRPG
|
6f8ae053f61d1b5070706276b850e96a27a10fe3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import pygame
import sys
import os
from pygame.locals import *
from context import *
from imagemanager import *
from sprite import *
from animation import *
from entity import *
from player import *
from npc import *
from random import randint
game = Game()
game.addEntity(NPC((0, 0), game.context.imageManager.getAnimationDup(
"greenSprite"), False, True))
game.addEntity(
NPC((0, 0), game.context.imageManager.getAnimationDup("redSprite"), False))
### Main loop ###
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
game.update()
game.draw()
#################
| 25.540541
| 79
| 0.592063
|
#!/usr/bin/python
import pygame
import sys
import os
from pygame.locals import *
from context import *
from imagemanager import *
from sprite import *
from animation import *
from entity import *
from player import *
from npc import *
from random import randint
class Game:
def __init__(self):
self.context = Context(MAGENTA, R600, 60, "Game")
self.room = (0, 0)
self.player = Player(
(0, 0), self.context.imageManager.getAnimationDup("redSprite"))
self.entities = []
def addEntity(self, entity):
self.entities.append(entity)
def update(self):
keys = pygame.key.get_pressed()
if keys[K_UP]:
self.player.vel = (self.player.vel[0], self.player.vel[1] - 1)
if keys[K_DOWN]:
self.player.vel = (self.player.vel[0], self.player.vel[1] + 1)
if keys[K_RIGHT]:
self.player.vel = (self.player.vel[0] + 1, self.player.vel[1])
if keys[K_LEFT]:
self.player.vel = (self.player.vel[0] - 1, self.player.vel[1])
self.player.update(0.85)
for ent in self.entities:
ent.update(0.85)
if isinstance(ent, NPC):
ent.npcUpdate(self.player)
def draw(self):
self.context.startDraw()
self.context.drawMap((11, 11))
self.player.draw(self.context)
for ent in self.entities:
ent.draw(self.context)
self.context.endDraw()
game = Game()
game.addEntity(NPC((0, 0), game.context.imageManager.getAnimationDup(
"greenSprite"), False, True))
game.addEntity(
NPC((0, 0), game.context.imageManager.getAnimationDup("redSprite"), False))
### Main loop ###
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
game.update()
game.draw()
#################
| 1,088
| -10
| 130
|
c9f228e98789ae2415e7a262bb24aaee24aa5133
| 602
|
py
|
Python
|
kisensum/openadr/openadr/vtn/migrations/0047_auto_20171122_1739.py
|
ChargePoint/volttron-applications
|
8d99c01a93f7c1ea98d4e4b0cfcefe85fe26320b
|
[
"BSD-3-Clause"
] | null | null | null |
kisensum/openadr/openadr/vtn/migrations/0047_auto_20171122_1739.py
|
ChargePoint/volttron-applications
|
8d99c01a93f7c1ea98d4e4b0cfcefe85fe26320b
|
[
"BSD-3-Clause"
] | 4
|
2021-03-19T23:36:34.000Z
|
2021-12-13T19:45:54.000Z
|
kisensum/openadr/openadr/vtn/migrations/0047_auto_20171122_1739.py
|
ChargePoint/volttron-applications
|
8d99c01a93f7c1ea98d4e4b0cfcefe85fe26320b
|
[
"BSD-3-Clause"
] | 1
|
2020-05-25T05:03:55.000Z
|
2020-05-25T05:03:55.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-22 17:39
from __future__ import unicode_literals
from django.db import migrations
| 25.083333
| 89
| 0.61794
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-22 17:39
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('vtn', '0046_remove_site_reporting_status'),
]
operations = [
migrations.AlterModelOptions(
name='drprogram',
options={'verbose_name': 'DR Program', 'verbose_name_plural': 'DR Programs'},
),
migrations.AlterModelOptions(
name='telemetry',
options={'verbose_name_plural': 'Telemetry'},
),
]
| 0
| 431
| 23
|
d71492cb94fa7ad72d79c38a0e0a8119462e9e42
| 5,464
|
py
|
Python
|
data_processing/src/data_processor.py
|
BarrySunderland/HackZurich2021
|
860b3a9ad1c1ec59343429d9489edc3d4a2b09b0
|
[
"MIT"
] | null | null | null |
data_processing/src/data_processor.py
|
BarrySunderland/HackZurich2021
|
860b3a9ad1c1ec59343429d9489edc3d4a2b09b0
|
[
"MIT"
] | null | null | null |
data_processing/src/data_processor.py
|
BarrySunderland/HackZurich2021
|
860b3a9ad1c1ec59343429d9489edc3d4a2b09b0
|
[
"MIT"
] | null | null | null |
import pandas as pd
import tqdm
from paths import PATHS
import os
if __name__ == "__main__":
main()
| 33.521472
| 98
| 0.57284
|
import pandas as pd
import tqdm
from paths import PATHS
import os
class DataProcessor:
@staticmethod
def gen_proc_file_name(file_name):
return os.path.join(PATHS.data, "processed", file_name)
@staticmethod
def add_rssi_fields_to_disruptions(save: bool = True):
"""This methods perform inner join on disruptions with the rssi values.
This results in obtaining values in rssi table for events of interest.
Returns:
[type]: [description]
"""
# RSSI data is too big to load onn cpu at a time.
rssi_df_s = pd.read_csv(
PATHS.rssi,
chunksize=10000,
iterator=True,
infer_datetime_format=True,
parse_dates=True,
)
disruption_df = pd.read_csv(
PATHS.disruption, infer_datetime_format=True, parse_dates=True
)
merged_dfs = []
with tqdm.tqdm(total=3098) as pbar:
for rssi_df in rssi_df_s:
merged_dfs.append(pd.merge(rssi_df, disruption_df, on="DateTime"))
pbar.update(1)
result_df = pd.concat(merged_dfs)
if save:
result_df.to_csv(DataProcessor.gen_proc_file_name("disruption.csv"))
return result_df
@staticmethod
def add_rssi_fields_to_events(save: bool = True):
"""This methods perform inner join on disruptions with the rssi values.
This results in obtaining values in rssi table for events of interest.
Returns:
[type]: [description]
"""
# RSSI data is too big to load onn cpu at a time.
rssi_df_s = pd.read_csv(
PATHS.rssi,
chunksize=10000,
iterator=True,
infer_datetime_format=True,
parse_dates=True,
)
events_df = pd.read_csv(
PATHS.events, infer_datetime_format=True, parse_dates=True
)
merged_dfs = []
with tqdm.tqdm(total=3098) as pbar:
for rssi_df in rssi_df_s:
merged_dfs.append(pd.merge(rssi_df, events_df, on="DateTime"))
pbar.update(1)
result_df = pd.concat(merged_dfs)
if save:
result_df.to_csv(DataProcessor.gen_proc_file_name("events.csv"))
return pd.concat(merged_dfs)
@staticmethod
def combine_events(save: bool = False):
"""This methods combines "A1_TotalTel","A1_ValidTel","A2_RSSI","A2_TotalTel","A2_ValidTel"
for a day and location based on day and distance.
"""
# RSSI data is too big to load onn cpu at a time.
column_of_interest = [
"PositionNoLeap",
"Date",
"Latitude",
"Longitude",
"A1_TotalTel",
"A1_ValidTel",
"A2_RSSI",
"A2_TotalTel",
"A2_ValidTel",
]
rssi_df_s = pd.read_csv(
PATHS.rssi,
chunksize=10000,
iterator=True,
infer_datetime_format=True,
parse_dates=True,
)
merged_dfs = []
with tqdm.tqdm(total=3098) as pbar:
for rssi_df in rssi_df_s:
rssi_df["Date"] = pd.to_datetime(rssi_df["DateTime"]).dt.date
merged_dfs.append(
rssi_df[column_of_interest]
.groupby(by=["Date", "PositionNoLeap"])
.mean()
)
pbar.update(1)
result_df = pd.concat(merged_dfs)
if save:
result_df.to_csv(DataProcessor.gen_proc_file_name("rssi.csv"))
return result_df
@staticmethod
def make_positional_mapping(save=True):
column_of_interest = ["PositionNoLeap", "Latitude", "Longitude"]
rssi_df_s = pd.read_csv(
PATHS.rssi,
chunksize=10000,
iterator=True,
infer_datetime_format=True,
parse_dates=True,
)
merged_dfs = []
with tqdm.tqdm(total=3098) as pbar:
for rssi_df in rssi_df_s:
temp = (
rssi_df[column_of_interest]
.groupby(by=["PositionNoLeap"])
.mean()
.reset_index()
)
temp["Position_m"] = temp["PositionNoLeap"] // 1000
temp = temp.groupby(by=["Position_m"]).mean()
merged_dfs.append(temp)
pbar.update(1)
# break
result_df = pd.concat(merged_dfs)
result_df = (
result_df.reset_index()
.drop_duplicates("Position_m")
.set_index("Position_m")
)
if save:
result_df.to_csv(DataProcessor.gen_proc_file_name("location.csv"))
return result_df
def main():
print("1. Adding RSSI values to disruption")
disruption_path = DataProcessor.gen_proc_file_name("disruption.csv")
location_path = DataProcessor.gen_proc_file_name("location.csv")
if not os.path.exists(disruption_path):
DataProcessor.add_rssi_fields_to_disruptions()
else:
print("skipping disruption processing, file already exists!")
print("2. Mapping locations to coordinates")
if not os.path.exists(location_path):
DataProcessor.make_positional_mapping()
else:
print("skipping location processing, file already exists!")
if __name__ == "__main__":
main()
| 1,791
| 3,519
| 46
|
1aff004c20676f19d8e2025960660d5b01dc9a38
| 6,688
|
py
|
Python
|
lib/kb_escher/kb_escher_utils.py
|
janakagithub/kb_escher
|
023c9d4d8c74d70a419e0e8dc9ed264b8cc731e4
|
[
"MIT"
] | null | null | null |
lib/kb_escher/kb_escher_utils.py
|
janakagithub/kb_escher
|
023c9d4d8c74d70a419e0e8dc9ed264b8cc731e4
|
[
"MIT"
] | null | null | null |
lib/kb_escher/kb_escher_utils.py
|
janakagithub/kb_escher
|
023c9d4d8c74d70a419e0e8dc9ed264b8cc731e4
|
[
"MIT"
] | null | null | null |
import json
import escher
import modelseed_escher
import cobrakbase
from cobrakbase.core.converters import KBaseFBAModelToCobraBuilder
from modelseed_escher.convert_utils import move_to_compartment
#{'workspace_id': 37534, 'map_ids': ['dfsdfsdf'], 'grid_x': '1', 'grid_y': '1', 'model_objects': [{'object_ids': ['37534/91/1'], 'model_id': '37534/90/3'}, {'object_ids': ['37534/92/1'], 'model_id': '37534/89/1'}], 'report_height': '800', 'workspace_name': 'filipeliu:narrative_1580723870549', 'model_ids': ['37534/90/3', '37534/89/1']}
| 38.217143
| 336
| 0.619767
|
import json
import escher
import modelseed_escher
import cobrakbase
from cobrakbase.core.converters import KBaseFBAModelToCobraBuilder
from modelseed_escher.convert_utils import move_to_compartment
#{'workspace_id': 37534, 'map_ids': ['dfsdfsdf'], 'grid_x': '1', 'grid_y': '1', 'model_objects': [{'object_ids': ['37534/91/1'], 'model_id': '37534/90/3'}, {'object_ids': ['37534/92/1'], 'model_id': '37534/89/1'}], 'report_height': '800', 'workspace_name': 'filipeliu:narrative_1580723870549', 'model_ids': ['37534/90/3', '37534/89/1']}
def dump_chemical_abundance_datasets(model_id, datasets, map_decorators):
for dataset_id in datasets:
if not model_id in map_decorators:
map_decorators[model_id] = {}
if not 'compound' in map_decorators[model_id]:
map_decorators[model_id]['compound'] = {}
if not 'chemical_abundance' in map_decorators[model_id]['compound']:
map_decorators[model_id]['compound']['chemical_abundance'] = []
filename = '{}_{}.json'.format(model_id, dataset_id)
filepath = '../data/html/data/datasets/{}'.format(filename)
map_decorators[model_id]['compound']['chemical_abundance'].append({
'id' : dataset_id,
'path' : filename
})
with open(filepath, 'w') as fh:
fh.write(json.dumps(datasets[dataset_id]))
return map_decorators
def dump_expression_datasets(model_id, datasets, map_decorators):
for dataset_id in datasets:
if not model_id in map_decorators:
map_decorators[model_id] = {}
if not 'gene' in map_decorators[model_id]:
map_decorators[model_id]['gene'] = {}
if not 'expression' in map_decorators[model_id]['gene']:
map_decorators[model_id]['gene']['expression'] = []
filename = '{}_{}.json'.format(model_id, dataset_id)
filepath = '../data/html/data/datasets/{}'.format(filename)
map_decorators[model_id]['gene']['expression'].append({
'id' : dataset_id,
'path' : filename
})
with open(filepath, 'w') as fh:
fh.write(json.dumps(datasets[dataset_id]))
return map_decorators
def get_chemical_abundance_data(data, dataset, mapping):
col_index = None
result = {}
try:
col_index = data['data']['col_ids'].index(dataset)
except ValueError:
return None
for i in range(len(data['data']['row_ids'])):
k = data['data']['row_ids'][i]
v = data['data']['values'][i][col_index]
if not v == None:
if k in mapping['instances'] and len(mapping['instances'][k][5]) > 0:
result[mapping['instances'][k][5]] = v
else:
result[k] = v
return result
def get_all_chemical_abundance_data(data, mapping):
result = {}
for dataset in data['data']['col_ids']:
chemical_abundance = get_chemical_abundance_data(data, dataset, mapping)
if len(chemical_abundance) > 0:
result[dataset] = chemical_abundance
return result
def get_expression_data(data, dataset):
col_index = None
result = {}
try:
col_index = data['data']['col_ids'].index(dataset)
except ValueError:
return None
for i in range(len(data['data']['row_ids'])):
k = data['data']['row_ids'][i]
v = data['data']['values'][i][col_index]
result[k] = v
return result
def get_all_expression_data(data):
result = {}
for dataset in data['data']['col_ids']:
result[dataset] = get_expression_data(data, dataset)
return result
def scan_content(escher_map, fbamodel, fba):
map_cpd_set = set()
map_rxn_set = set()
for node_uid in map_data.nodes:
node = map_data.escher_graph['nodes'][node_uid]
if node['node_type'] == 'metabolite':
map_cpd_set.add(node['bigg_id'])
for rxn_uid in map_data.escher_graph['reactions']:
rxn_node = map_data.escher_graph['reactions'][rxn_uid]
map_rxn_set.add(rxn_node['bigg_id'])
cpd_in_map_count = len(map_cpd_set & set(map(lambda x : x.get_seed_id(), fbamodel.metabolites)))
rxn_in_map_count = len(map_rxn_set & set(map(lambda x : x.data['reaction_ref'].split('/')[-1].split('_')[0], fbamodel.reactions)))
gene_in_map_count = 0
return {
'map_cpd' : len(map_cpd_set),
'cpd_in_map_count' : cpd_in_map_count,
'map_rxn' : len(map_rxn_set),
'rxn_in_map_count' : rxn_in_map_count
}
def setup_viewer_data(params, api, data_path):
models = {
'iML1515' : fbamodel1,
'KBase' : fbamodel2,
}
for model_ref in params['model_ids']:
ref = api.get_object_info_from_ref(model_ref)
model_raw = api.get_object(ref.id, ref.workspace_id)
fbamodel = cobrakbase.core.KBaseFBAModel(model_raw)
models[model_ref] = fbamodel
b = cobrakbase.core.converters.KBaseFBAModelToCobraBuilder(fbamodel)
cobra_model = b.build()
for model_id in models:
b = KBaseFBAModelToCobraBuilder(models[model_id])
cobra_model = b.build()
cobra_json = json.loads(cobra.io.to_json(cobra_model))
for m in cobra_json['metabolites']:
m['id'] += '@' + model_id
for r in cobra_json['reactions']:
r['id'] += '@' + model_id
r['metabolites'] = dict(map(lambda x : (x + '@' + model_id, r['metabolites'][x]), r['metabolites']))
with open(data_path + '/map_model/' + model_id + '.json', 'w') as fh:
fh.write(json.dumps(cobra_json))
grid = modelseed_escher.EscherGrid()
em_list = []
map_assembly = []
for grid_block_data in map_assembly:
map_id = grid_block_data['map_id']
cmp_id = grid_block_data['cmp_id']
model_id = grid_block_data['model_id']
fbamodel = models[model_id]
em = escher_seed.get_map('ModelSEED', 'ModelSEED', map_id)
em = adapt_map_to_model(em, cmp_id, model_id, fbamodel)
em_list.append(em)
grid_map = grid.build(em_list, (int(params['grid_x']), int(params['grid_y'])))
map_list = {}
for m in escher_seed.list_maps('ModelSEED'):
model_id, map_id = m.split('.')
map_data = escher_seed.get_map('ModelSEED', model_id, map_id)
map_list[map_id] = {}
for model_id in models:
map_list[map_id][model_id] = {}
map_list[map_id][model_id] = scan_content(map_data, models[model_id], None)
with open(data_path + '/map_list.json', 'w') as fh:
fh.write(json.dumps(map_list))
return models
| 5,965
| 0
| 185
|
2e36b8ccbbb6c24b6057bc58fbc6eff45f8d865c
| 892
|
py
|
Python
|
examples/most_shared.py
|
fossabot/pynytimes
|
96aeb5878f45a3ca9e8528980d3022de2f3c8bc4
|
[
"MIT"
] | 22
|
2020-02-08T22:10:02.000Z
|
2022-01-25T02:07:46.000Z
|
examples/most_shared.py
|
fossabot/pynytimes
|
96aeb5878f45a3ca9e8528980d3022de2f3c8bc4
|
[
"MIT"
] | 6
|
2020-02-21T16:36:42.000Z
|
2022-01-06T04:02:39.000Z
|
examples/most_shared.py
|
fossabot/pynytimes
|
96aeb5878f45a3ca9e8528980d3022de2f3c8bc4
|
[
"MIT"
] | 10
|
2020-03-25T08:44:11.000Z
|
2022-02-11T11:03:37.000Z
|
from pynytimes import NYTAPI
# Make sure to set parse dates to True so that the dates
# are parsed into datetime.datetime or datetime.date objects
nyt = NYTAPI(
key="Your API Key", # Get your API Key at https://developer.nytimes.com
parse_dates=True,
)
# Get most shared articles of today
most_shared = nyt.most_shared()
# Optionally you can also define the timeframe
# Valid options are 1, 7, 30
most_shared_last_week = nyt.most_shared(days=7)
most_shared_last_month = nyt.most_shared(days=30)
# You can also define the method of sharing.
# Options are: email (default) or facebook.
most_shared_email = nyt.most_shared(method="email")
most_shared_facebook = nyt.most_shared(method="facebook")
# These options can also be mixed and matched
# So the most shared articles of last month on facebook are
most_shared_last_month_facebook = nyt.most_shared(days=30, method="facebook")
| 34.307692
| 77
| 0.773543
|
from pynytimes import NYTAPI
# Make sure to set parse dates to True so that the dates
# are parsed into datetime.datetime or datetime.date objects
nyt = NYTAPI(
key="Your API Key", # Get your API Key at https://developer.nytimes.com
parse_dates=True,
)
# Get most shared articles of today
most_shared = nyt.most_shared()
# Optionally you can also define the timeframe
# Valid options are 1, 7, 30
most_shared_last_week = nyt.most_shared(days=7)
most_shared_last_month = nyt.most_shared(days=30)
# You can also define the method of sharing.
# Options are: email (default) or facebook.
most_shared_email = nyt.most_shared(method="email")
most_shared_facebook = nyt.most_shared(method="facebook")
# These options can also be mixed and matched
# So the most shared articles of last month on facebook are
most_shared_last_month_facebook = nyt.most_shared(days=30, method="facebook")
| 0
| 0
| 0
|
ddd6dac3f6edac94995420fbf926471fcc3bdbf8
| 998
|
py
|
Python
|
getStockRecommendations.py
|
armstjc/stock_self_deployment
|
61abdeca948542679b9bdb7956125d255b828790
|
[
"MIT"
] | null | null | null |
getStockRecommendations.py
|
armstjc/stock_self_deployment
|
61abdeca948542679b9bdb7956125d255b828790
|
[
"MIT"
] | null | null | null |
getStockRecommendations.py
|
armstjc/stock_self_deployment
|
61abdeca948542679b9bdb7956125d255b828790
|
[
"MIT"
] | null | null | null |
import pandas as pd
import yfinance as yf
#from tqdm import tqdm
from RefreshStockAbv import getStockList
from datetime import datetime
stockList = pd.read_csv('Stock_List.csv')
stockListLen = len(stockList)
if __name__ == "__main__":
main()
| 24.95
| 97
| 0.628257
|
import pandas as pd
import yfinance as yf
#from tqdm import tqdm
from RefreshStockAbv import getStockList
from datetime import datetime
stockList = pd.read_csv('Stock_List.csv')
stockListLen = len(stockList)
def getStockRecommendations():
print('')
arr = stockList['Symbol'].to_numpy()
start = datetime.now()
arr_len = len(arr)
arr_count = 0
for i in arr.T:
stock = yf.Ticker(i)
stockRecommendations = stock.recommendations
try:
stockRecommendations['ABV'] = i
stockRecommendations.to_csv('Data/StockRecommendations/'+ i + '_recommendations.csv')
except:
pass
now = datetime.now()
durration = now - start
p_d = str(durration)
#print(p_d)
arr_count = arr_count +1
print(f'{p_d} getStockRecommendations {arr_count}/{arr_len}: {i}')
def main():
print('starting up')
getStockRecommendations()
if __name__ == "__main__":
main()
| 704
| 0
| 46
|
9c1dd96ad54821d861f87d1cbea67c4e7ab318bb
| 961
|
py
|
Python
|
django-celery-progress-sample/data/models.py
|
radugaf/converter
|
ff6339e602d9dd4e9edf3f21cd485062c8dafd96
|
[
"MIT"
] | null | null | null |
django-celery-progress-sample/data/models.py
|
radugaf/converter
|
ff6339e602d9dd4e9edf3f21cd485062c8dafd96
|
[
"MIT"
] | null | null | null |
django-celery-progress-sample/data/models.py
|
radugaf/converter
|
ff6339e602d9dd4e9edf3f21cd485062c8dafd96
|
[
"MIT"
] | null | null | null |
from django.db import models
from multiselectfield import MultiSelectField
# Create your models here.
PRODUCT_NAME = (
("Product-A", "Product A"),
("Product-B", "Product B"),
("Product-C", "Product C"),
)
| 25.972973
| 81
| 0.644121
|
from django.db import models
from multiselectfield import MultiSelectField
# Create your models here.
PRODUCT_NAME = (
("Product-A", "Product A"),
("Product-B", "Product B"),
("Product-C", "Product C"),
)
class FileInfo(models.Model):
file = models.FileField()
status = models.CharField(
max_length=250,
choices=(
('PENDING', 'PENDING'),
('PROCESSING', 'PROCESSING'),
('PROCESSED', 'PROCESSED'),
),
default='PENDING'
)
class Data(models.Model):
file_info = models.ForeignKey(FileInfo, on_delete=models.SET_NULL, null=True)
title = models.CharField(max_length=100, blank=True, null=True)
delivery_date = models.DateField()
quantity = models.IntegerField(default=1)
# Helpers
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["-created_date"]
| 0
| 695
| 46
|
24f89f9fcfd6e5f2a71560ee2214cab1b66e740b
| 7,262
|
py
|
Python
|
run_train.py
|
mr4msm/ai_edge_seg
|
0917d58603eae0290e41ccfe307f25cb8180e57c
|
[
"MIT"
] | null | null | null |
run_train.py
|
mr4msm/ai_edge_seg
|
0917d58603eae0290e41ccfe307f25cb8180e57c
|
[
"MIT"
] | null | null | null |
run_train.py
|
mr4msm/ai_edge_seg
|
0917d58603eae0290e41ccfe307f25cb8180e57c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import multiprocessing as mp
import os
import sys
from argparse import ArgumentParser
from chainer import optimizers as optims
from chainer import serializers as S
from chainer.optimizer import GradientClipping, WeightDecay
from batch_generator import BatchGenerator
from loss import Loss
from misc import argv2string
from model import Model
from train import train
if __name__ == '__main__':
main()
| 33.009091
| 79
| 0.610025
|
# -*- coding: utf-8 -*-
import multiprocessing as mp
import os
import sys
from argparse import ArgumentParser
from chainer import optimizers as optims
from chainer import serializers as S
from chainer.optimizer import GradientClipping, WeightDecay
from batch_generator import BatchGenerator
from loss import Loss
from misc import argv2string
from model import Model
from train import train
def main():
parser = ArgumentParser()
parser.add_argument(
'train_data', help='train data'
)
parser.add_argument(
'train_labels', help='train labels'
)
parser.add_argument(
'--val-data', default=None, help='val data'
)
parser.add_argument(
'--val-labels', default=None, help='val labels'
)
parser.add_argument(
'-b', '--batch-size', type=int, default=5,
help='mini-batch size (default=5)'
)
parser.add_argument(
'--beta2', type=float, default=0.999,
help='beta2 of Adam (default=0.999)'
)
parser.add_argument(
'-g', '--gpu-id', type=int, default=-1,
help='GPU ID (default=-1, indicates CPU)'
)
parser.add_argument(
'--ignore-labels', type=int, default=[], nargs='+',
help='labels to ignore (default=[])'
)
parser.add_argument(
'-l', '--learning-rate', type=float, default=0.1,
help='learning rate (default=0.1)'
)
parser.add_argument(
'--max-iter', type=int, default=160000,
help='train model up to max-iter (default=160000)'
)
parser.add_argument(
'--mean-interval', type=int, default=1000,
help='calculate mean of train/loss (and validation loss) ' +
'every mean-interval iters (default=1000)'
)
parser.add_argument(
'--model', default=None,
help='resume to train the model'
)
parser.add_argument(
'--momentum', type=float, default=0.9,
help='momentum rate (default=0.9)'
)
parser.add_argument(
'--n-classes', type=int, default=5,
help='number of classes (default=5)'
)
parser.add_argument(
'--noise', default='no',
help='noise injection method. \'no\', \'patch\', ' +
'and \'permutation\' are available (default=\'no\')'
)
parser.add_argument(
'--optim', default='nesterov',
help='optimization method. \'sgd\', \'nesterov\', ' +
'and \'adam\' are available (default=\'nesterov\')'
)
parser.add_argument(
'-o', '--outdir', default='./',
help='trained models and optimizer states are stored in outdir ' +
'(default=\'./\')'
)
parser.add_argument(
'--queue-maxsize', type=int, default=10,
help='maxsize of queues for training and validation (default=10)'
)
parser.add_argument(
'--save-interval', type=int, default=10000,
help='save model & optimizer every save-interval iters (default=10000)'
)
parser.add_argument(
'--state', default=None,
help='optimizer state. resume to train the model with the optimizer'
)
parser.add_argument(
'-w', '--weight-decay', type=float, default=1e-4,
help='weight decay factor (default=1e-4)'
)
args = parser.parse_args()
print(argv2string(sys.argv) + '\n')
for arg in dir(args):
if arg[:1] == '_':
continue
print('{} = {}'.format(arg, getattr(args, arg)))
print()
if not os.path.isdir(args.outdir):
os.makedirs(args.outdir)
print('mkdir ' + args.outdir + '\n')
model = Model(in_ch=3, out_ch=args.n_classes)
if args.model is not None:
S.load_npz(args.model, model)
loss_func = Loss(model)
if args.optim.lower() in 'sgd':
if args.momentum > 0:
optim = optims.CorrectedMomentumSGD(
lr=args.learning_rate, momentum=args.momentum)
else:
optim = optims.SGD(lr=args.learning_rate)
elif args.optim.lower() in 'nesterovag':
optim = optims.NesterovAG(
lr=args.learning_rate, momentum=args.momentum)
elif args.optim.lower() in 'adam':
optim = optims.Adam(
alpha=args.learning_rate, beta1=args.momentum,
beta2=args.beta2, weight_decay_rate=args.weight_decay,
amsgrad=True)
else:
raise ValueError('Please specify an available optimizer name.\n' +
'SGD, NesterovAG, and Adam are available.')
print('{}\n'.format(type(optim)))
optim.setup(model)
if args.state is not None:
S.load_npz(args.state, optim)
if (args.weight_decay > 0) and not isinstance(optim, optims.Adam):
optim.add_hook(WeightDecay(args.weight_decay))
optim.add_hook(GradientClipping(1))
lr_decay_iter_dict = {int(5 * args.max_iter / 8): 0.1,
int(7 * args.max_iter / 8): 0.1,
}
with open(args.train_data, 'r') as f:
train_data_path_list = [line.strip() for line in f.readlines()]
with open(args.train_labels, 'r') as f:
train_labels_path_list = [line.strip() for line in f.readlines()]
assert len(train_data_path_list) == len(train_labels_path_list)
if (args.val_data is not None) or (args.val_labels is not None):
if (args.val_data is not None) and (args.val_labels is not None):
with open(args.val_data, 'r') as f:
val_data_path_list = [line.strip() for line in f.readlines()]
with open(args.val_labels, 'r') as f:
val_labels_path_list = [line.strip() for line in f.readlines()]
assert len(val_data_path_list) == len(val_labels_path_list)
else:
raise ValueError('Either val_data or val_labels is not specified.')
train_queue = mp.Queue(maxsize=args.queue_maxsize)
train_generator = BatchGenerator(
args.batch_size, train_data_path_list, train_labels_path_list,
train_queue, train=True, noise_injection=args.noise,
out_height=512, out_width=512,
max_height=1216, max_width=1216,
min_height=832, min_width=832)
train_generator.start()
if args.val_data is None:
val_queue = None
else:
val_queue = mp.Queue(maxsize=args.queue_maxsize)
try:
val_generator = BatchGenerator(
1, val_data_path_list, val_labels_path_list, val_queue,
train=False, out_height=608, out_width=968)
val_generator.start()
except Exception:
train_generator.terminate()
train_queue.close()
val_queue.close()
raise
try:
train(loss_func, optim, train_queue, args.max_iter, args.mean_interval,
args.save_interval, val_queue, lr_decay_iter_dict, args.gpu_id,
args.ignore_labels, args.outdir)
except BaseException:
train_generator.terminate()
train_queue.close()
if val_queue is not None:
val_generator.terminate()
val_queue.close()
raise
train_generator.terminate()
train_queue.close()
if val_queue is not None:
val_generator.terminate()
val_queue.close()
if __name__ == '__main__':
main()
| 6,805
| 0
| 23
|
e799cc0ae12f93dcc95fc13743f0d32a689d7492
| 9,606
|
py
|
Python
|
testcases/testcase.py
|
Superalvin1123/wcs-python3-sdk
|
a18fb6194707935e7e9fb28c662a62dcd341cf02
|
[
"MIT"
] | null | null | null |
testcases/testcase.py
|
Superalvin1123/wcs-python3-sdk
|
a18fb6194707935e7e9fb28c662a62dcd341cf02
|
[
"MIT"
] | null | null | null |
testcases/testcase.py
|
Superalvin1123/wcs-python3-sdk
|
a18fb6194707935e7e9fb28c662a62dcd341cf02
|
[
"MIT"
] | 1
|
2022-02-07T09:26:19.000Z
|
2022-02-07T09:26:19.000Z
|
#!/usr/bin/python
# -*-coding:utf-8-*-
import os,sys
import unittest
from os.path import expanduser
sys.path.append('../')
from wcs.commons.config import Config
from wcs.services.client import Client
from wcs.commons.putpolicy import PutPolicy
from wcs.commons.logme import debug
from wcs.commons.util import urlsafe_base64_encode
print (sys.version)
config_file = os.path.join(expanduser("~"), ".wcscfg")
#文件直传
#流数据上传
#分片上传(有同名文件直接覆盖)
#智能上传(文件大于10M启用分片上传,有同名文件直接覆盖)
#列举空间
# 指定mode=0,指定prefix精确匹配,获取空间文件
#空间状态信息
#文件信息查询
#文件删除
#文件移动
#文件复制
#文件过期时间设置
#fmgr 文件移动
#fmgr 文件复制
#fmgr 文件fetch
#fmgr 文件删除
#fmgr 按前缀删除文件
#fmgr m3u8文件删除
#fmgr 任务状态查询
#音视频持久化操作
#音视频处理结果查询
#查询统计数据
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(WcsTestCases("test_image_detect"))
runner = unittest.TextTestRunner()
runner.run(suite)
| 34.6787
| 124
| 0.630127
|
#!/usr/bin/python
# -*-coding:utf-8-*-
import os,sys
import unittest
from os.path import expanduser
sys.path.append('../')
from wcs.commons.config import Config
from wcs.services.client import Client
from wcs.commons.putpolicy import PutPolicy
from wcs.commons.logme import debug
from wcs.commons.util import urlsafe_base64_encode
print (sys.version)
config_file = os.path.join(expanduser("~"), ".wcscfg")
class WcsTestCases(unittest.TestCase):
def setUp(self):
self.cfg = Config(config_file)
self.cli = Client(self.cfg)
self.bucket = 'qz-mulitupload-caiyz-test'
self.filepath = 'E:\\157.jpg'
#文件直传
def test_simple_upload(self):
key = '20180408.jpg'
path = self.filepath
return_data = self.cli.simple_upload(path, self.bucket, key)
debug(return_data)
self.assertEqual(return_data[0],200)
#流数据上传
def test_stream_upload(self):
stream = 'http://big-caiyz-fmgr-cache.com/1m.jpg'
key = '1m.jpg'
return_data = self.cli.stream_upload(stream, self.bucket, key)
debug(return_data)
self.assertEqual(return_data[0],200)
#分片上传(有同名文件直接覆盖)
def test_multipart_upload(self):
path = 'F:\\5_.zip'
key = '5_.zip'
self.cfg.overwrite = 1
return_data = self.cli.multipart_upload(path, self.bucket, key)
debug(return_data)
self.assertEqual(return_data[0],200)
#智能上传(文件大于10M启用分片上传,有同名文件直接覆盖)
def test_samrt_upload(self):
path = '/root/caiyz/data/14M'
key = '100-2M'
self.cfg.overwrite =1
debug(self.cli.smart_upload(path, self.bucket, key, 10))
#列举空间
def test_bucket_list(self):
return_data = self.cli.bucket_list('list-bucket')
debug(return_data)
self.assertEqual(return_data[0],200)
# 指定mode=0,指定prefix精确匹配,获取空间文件
def test_list_mode_0_prefix_exact(self):
#print u'用例开始:\n'
self.bucket_list = 'list-bucket'
return_data = self.cli.bucket_list(self.bucket_list,mode='',prefix='temp_1')
print ("返回结果:{0}".format(return_data))
self.assertEqual(return_data[0],200)
#生成的结果与预期结果对比
#空间状态信息
def test_bucket_stat(self):
startdate = '2017-11-10'
enddate = '2017-11-12'
return_data = self.cli.bucket_stat(self.bucket, startdate, enddate)
debug(return_data)
self.assertEqual(return_data[0],200)
#文件信息查询
def test_stat(self):
key = '5_.zip'
return_data = self.cli.stat(self.bucket, key)
debug(return_data)
self.assertEqual(return_data[0],200)
#文件删除
def test_delete(self):
key ='5_.zip'
return_data = self.cli.delete(self.bucket,key)
debug(return_data)
self.assertEqual(return_data[0],200)
#文件移动
def test_move(self):
path = 'F:\\5_.zip'
key = '5_.zip'
self.cfg.overwrite = 1
self.cli.multipart_upload(path, self.bucket, key)
srckey = '5_.zip'
dstkey = '5_1.zip'
return_data = self.cli.move(self.bucket, srckey, self.bucket)
debug(return_data)
self.assertEqual(return_data[0],200)
#文件复制
def test_copy(self):
path = 'F:\\5_.zip'
key = '5_.zip'
self.cfg.overwrite = 1
self.cli.multipart_upload(path, self.bucket, key)
srckey = '5_.zip'
dstkey = '5_2.zip'
return_data = self.cli.copy(self.bucket, srckey, self.bucket,dstkey)
debug(return_data)
self.assertEqual(return_data[0],200)
#文件过期时间设置
def test_setdeadline(self):
path = 'F:\\5_.zip'
key = '5_.zip'
self.cfg.overwrite = 1
self.cli.multipart_upload(path, self.bucket, key)
deadline = '1'
return_data = self.cli.setdeadline(self.bucket, key, deadline)
debug(return_data)
self.assertEqual(return_data[0],200)
#fmgr 文件移动
def test_fmgr_move(self):
path = 'F:\\5_.zip'
key = '5_.zip'
self.cfg.overwrite = 1
self.cli.multipart_upload(path, self.bucket, key)
srckey = key
dstkey = '5_3.zip'
resource = urlsafe_base64_encode('%s:%s' % (self.bucket,srckey))
fops = 'resource/%s/bucket/%s/key/%s' % (resource,urlsafe_base64_encode(self.bucket), urlsafe_base64_encode(dstkey))
return_data = self.cli.fmgr_move(fops)
debug(return_data)
self.assertEqual(return_data[0],200)
#fmgr 文件复制
def test_fmgr_copy(self):
path = 'F:\\5_.zip'
key = '5_.zip'
self.cfg.overwrite = 1
self.cli.multipart_upload(path, self.bucket, key)
srckey = key
dstkey = '5_4.zip'
resource = urlsafe_base64_encode('%s:%s' % (self.bucket,srckey))
fops = 'resource/%s/bucket/%s/key/%s' % (resource,urlsafe_base64_encode(self.bucket), urlsafe_base64_encode(dstkey))
return_data = self.cli.fmgr_copy(fops)
debug(return_data)
self.assertEqual(return_data[0],200)
#fmgr 文件fetch
def test_fmgr_fetch(self):
url = 'http://big-caiyz-fmgr-cache.com/1m.jpg'
key = 'fetch_1m.jpg'
fetchurl = urlsafe_base64_encode(url)
enbucket = urlsafe_base64_encode(self.bucket)
enkey = urlsafe_base64_encode(key)
fops = 'fetchURL/%s/bucket/%s/key/%s' % (fetchurl, enbucket, enkey)
return_data = self.cli.fmgr_fetch(fops)
debug(return_data)
self.assertEqual(return_data[0],200)
#fmgr 文件删除
def test_fmgr_delete(self):
path = 'F:\\5_.zip'
key = '5_.zip'
self.cfg.overwrite = 1
self.cli.multipart_upload(path, self.bucket, key)
enbucket = urlsafe_base64_encode(self.bucket)
enkey = urlsafe_base64_encode(key)
fops = 'bucket/%s/key/%s' % (enbucket, enkey)
return_data = self.cli.fmgr_delete(fops)
debug(return_data)
self.assertEqual(return_data[0],200)
#fmgr 按前缀删除文件
def test_fmgr_prefix_del(self):
path = 'F:\\5_.zip'
key = 'aa/5_.zip'
self.cfg.overwrite = 1
self.cli.multipart_upload(path, self.bucket, key)
prefix = 'aa'
enbucket = urlsafe_base64_encode(self.bucket)
enprefix = urlsafe_base64_encode(prefix)
fops = 'bucket/%s/prefix/%s' % (enbucket, enprefix)
return_data = self.cli.prefix_delete(fops)
debug(return_data)
self.assertEqual(return_data[0],200)
#fmgr m3u8文件删除
def test_fmgr_m3u8_del(self):
self.cfg.overwrite = 1
key = 'M3U8_FILE.m3u8'
key_ts = '000001.ts'
path = 'E:\\m3u8\\M3U8_FILE.m3u8'
path_ts = 'E:\\m3u8\\000001.ts'
debug('start to upload m3u8')
self.cli.simple_upload(path, self.bucket, key)
debug('start to upload ts file')
self.cli.simple_upload(path_ts, self.bucket, key_ts)
enbucket = urlsafe_base64_encode(self.bucket)
enkey = urlsafe_base64_encode(key)
fops = 'bucket/%s/key/%s' % (enbucket, enkey)
return_data = self.cli.m3u8_delete(fops)
debug(return_data)
self.assertEqual(return_data[0],200)
#fmgr 任务状态查询
def test_fmgr_stat(self):
path = 'F:\\5_.zip'
key = '5_.zip'
self.cfg.overwrite = 1
self.cli.multipart_upload(path, self.bucket, key)
enbucket = urlsafe_base64_encode(self.bucket)
enkey = urlsafe_base64_encode(key)
fops = 'bucket/%s/key/%s' % (enbucket, enkey)
return_data = self.cli.fmgr_delete(fops)
debug(return_data)
persistentId = return_data[1].get('persistentId')
return_data = self.cli.fmgr_status(persistentId)
debug(return_data)
self.assertEqual(return_data[0],200)
#音视频持久化操作
def test_ops(self):
self.cfg.overwrite = 1
key = 'huhu.mp4'
path = 'E:\\huhu.mp4'
debug('start to upload huhu.mp4')
self.cli.simple_upload(path, self.bucket, key)
fops = 'vframe/jpg/offset/10|saveas/cXotbXVsaXR1cGxvYWQtY2FpeXotdGVzdDrop4bpopHmiKrlm74uanBn'
return_data = self.cli.ops_execute(fops,self.bucket,key)
debug(return_data)
self.assertEqual(return_data[0],200)
#音视频处理结果查询
def test_ops_status(self):
self.cfg.overwrite = 1
key = 'huhu.mp4'
path = 'E:\\huhu.mp4'
debug('start to upload huhu.mp4')
self.cli.simple_upload(path, self.bucket, key)
fops = 'vframe/jpg/offset/10|saveas/cXotbXVsaXR1cGxvYWQtY2FpeXotdGVzdDrop4bpopHmiKrlm74uanBn'
return_data = self.cli.ops_execute(fops,self.bucket,key)
persistentId = return_data[1].get('persistentId')
return_data = self.cli.ops_status(persistentId)
debug(return_data)
self.assertEqual(return_data[0],200)
def test_wslive_list(self):
channel = ''
startTime = ''
endTime = ''
debug(self.cli.wslive_list(channel, startTime, endTime,self.bucket))
#查询统计数据
def test_bucket_statistics(self):
return_data = self.cli.bucket_statistics(self.bucket, 'uploadRequest', '2019-12-20', '2019-12-31')
debug(return_data)
self.assertEqual(return_data[0],200)
def test_image_detect(self):
return_data = self.cli.image_detect('http://wcsd.chinanetcenter.com/xdd_15779502234034.png', 'porn', 'doc-pics')
debug(return_data)
self.assertEqual(return_data[0],200)
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(WcsTestCases("test_image_detect"))
runner = unittest.TextTestRunner()
runner.run(suite)
| 7,995
| 17
| 676
|
537fe9ec1b4225b677ace32d53e56f0672156914
| 1,854
|
py
|
Python
|
httprunner2jmeter/ext/har2case/__init__.py
|
BSTester/httprunner2jmeter
|
b7f18dbb6b6b36719ce8952239f6d0c1dfc75e4a
|
[
"Apache-2.0"
] | null | null | null |
httprunner2jmeter/ext/har2case/__init__.py
|
BSTester/httprunner2jmeter
|
b7f18dbb6b6b36719ce8952239f6d0c1dfc75e4a
|
[
"Apache-2.0"
] | null | null | null |
httprunner2jmeter/ext/har2case/__init__.py
|
BSTester/httprunner2jmeter
|
b7f18dbb6b6b36719ce8952239f6d0c1dfc75e4a
|
[
"Apache-2.0"
] | null | null | null |
""" Convert HAR (HTTP Archive) to YAML/JSON testcase for HttpRunner.
Usage:
# convert to JSON format testcase
$ hrun har2case demo.har
# convert to YAML format testcase
$ hrun har2case demo.har -2y
"""
from httprunner2jmeter.ext.har2case.core import HarParser
from sentry_sdk import capture_message
def init_har2case_parser(subparsers):
""" HAR converter: parse command line options and run commands.
"""
parser = subparsers.add_parser(
"har2case",
help="Convert HAR(HTTP Archive) to YAML/JSON testcases for HttpRunner.",
)
parser.add_argument("har_source_file", nargs="?", help="Specify HAR source file")
parser.add_argument(
"-2y",
"--to-yml",
"--to-yaml",
dest="to_yaml",
action="store_true",
help="Convert to YAML format, if not specified, convert to pytest format by default.",
)
parser.add_argument(
"-2j",
"--to-json",
dest="to_json",
action="store_true",
help="Convert to JSON format, if not specified, convert to pytest format by default.",
)
parser.add_argument(
"--filter",
help="Specify filter keyword, only url include filter string will be converted.",
)
parser.add_argument(
"--exclude",
help="Specify exclude keyword, url that includes exclude string will be ignored, "
"multiple keywords can be joined with '|'",
)
return parser
| 28.090909
| 94
| 0.651564
|
""" Convert HAR (HTTP Archive) to YAML/JSON testcase for HttpRunner.
Usage:
# convert to JSON format testcase
$ hrun har2case demo.har
# convert to YAML format testcase
$ hrun har2case demo.har -2y
"""
from httprunner2jmeter.ext.har2case.core import HarParser
from sentry_sdk import capture_message
def init_har2case_parser(subparsers):
""" HAR converter: parse command line options and run commands.
"""
parser = subparsers.add_parser(
"har2case",
help="Convert HAR(HTTP Archive) to YAML/JSON testcases for HttpRunner.",
)
parser.add_argument("har_source_file", nargs="?", help="Specify HAR source file")
parser.add_argument(
"-2y",
"--to-yml",
"--to-yaml",
dest="to_yaml",
action="store_true",
help="Convert to YAML format, if not specified, convert to pytest format by default.",
)
parser.add_argument(
"-2j",
"--to-json",
dest="to_json",
action="store_true",
help="Convert to JSON format, if not specified, convert to pytest format by default.",
)
parser.add_argument(
"--filter",
help="Specify filter keyword, only url include filter string will be converted.",
)
parser.add_argument(
"--exclude",
help="Specify exclude keyword, url that includes exclude string will be ignored, "
"multiple keywords can be joined with '|'",
)
return parser
def main_har2case(args):
har_source_file = args.har_source_file
if args.to_yaml:
output_file_type = "YAML"
elif args.to_json:
output_file_type = "JSON"
else:
output_file_type = "pytest"
capture_message(f"har2case {output_file_type}")
HarParser(har_source_file, args.filter, args.exclude).gen_testcase(output_file_type)
return 0
| 361
| 0
| 23
|
80b212116a284ff861135dacb00207507749fa8e
| 88
|
py
|
Python
|
qaoalib/qaoa/__init__.py
|
xenoicwyce/qaoalib
|
1c4889548ae94733091b562bab35f6b34c7c4992
|
[
"MIT"
] | null | null | null |
qaoalib/qaoa/__init__.py
|
xenoicwyce/qaoalib
|
1c4889548ae94733091b562bab35f6b34c7c4992
|
[
"MIT"
] | null | null | null |
qaoalib/qaoa/__init__.py
|
xenoicwyce/qaoalib
|
1c4889548ae94733091b562bab35f6b34c7c4992
|
[
"MIT"
] | null | null | null |
from .qmc import Qmc, QmcFastKron
from .layerwise import Layerwise
from .utils import *
| 22
| 33
| 0.795455
|
from .qmc import Qmc, QmcFastKron
from .layerwise import Layerwise
from .utils import *
| 0
| 0
| 0
|
2db77b940ee672a4b9d7248883e87d6c60a6f824
| 15,317
|
py
|
Python
|
tests/beos_plugin_tests/beos_test_utils/beosnode.py
|
terradacs/beos-core
|
31e19170bcad573b1d498811284e62babd478f92
|
[
"MIT"
] | 9
|
2019-04-04T18:46:14.000Z
|
2022-03-03T16:22:56.000Z
|
tests/beos_plugin_tests/beos_test_utils/beosnode.py
|
terradacs/beos-core
|
31e19170bcad573b1d498811284e62babd478f92
|
[
"MIT"
] | null | null | null |
tests/beos_plugin_tests/beos_test_utils/beosnode.py
|
terradacs/beos-core
|
31e19170bcad573b1d498811284e62babd478f92
|
[
"MIT"
] | 3
|
2019-03-19T17:45:08.000Z
|
2021-03-22T21:45:35.000Z
|
import os
import sys
import time
import random
import string
import datetime
import collections
try:
from beos_test_utils import run
except Exception as _ex:
print("Faild to import run.py script. Please make sure that run ./deploy.py --build-beos. Aborting.")
exit(1)
import beos_test_utils.beosactionpatterns as patterns
from beos_test_utils.logger import log
from beos_test_utils.eoscleoscaller import EOSCleosCaller
from beos_test_utils.eostransactioncaller import EOSTransactionCaller
from beos_test_utils.summarizer import ActionResult
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from cd_scripts import eosio_rpc_actions
from cd_scripts import eosio_rpc_client
| 47.421053
| 178
| 0.643729
|
import os
import sys
import time
import random
import string
import datetime
import collections
try:
from beos_test_utils import run
except Exception as _ex:
print("Faild to import run.py script. Please make sure that run ./deploy.py --build-beos. Aborting.")
exit(1)
import beos_test_utils.beosactionpatterns as patterns
from beos_test_utils.logger import log
from beos_test_utils.eoscleoscaller import EOSCleosCaller
from beos_test_utils.eostransactioncaller import EOSTransactionCaller
from beos_test_utils.summarizer import ActionResult
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from cd_scripts import eosio_rpc_actions
from cd_scripts import eosio_rpc_client
class BEOSNode(object):
node = "node"
class BEOSNodeData(object):
def __init__(self, _node_ip, _node_port, _keosd_ip, _keosd_port, _wallet_name):
self.node_ip = _node_ip
self.node_port = _node_port
self.keosd_ip = _keosd_ip
self.keosd_port = _keosd_port
self.wallet_name = _wallet_name,
def __init__(self, _node_ip, _node_port, _keosd_ip, _keosd_port, _wallet_name, _path_to_cleos ):
self.cleos = EOSCleosCaller(_node_ip, _node_port, _keosd_ip, _keosd_port, _wallet_name, _path_to_cleos)
self.utils = EOSTransactionCaller(_node_ip, _node_port, _keosd_ip, _keosd_port, _wallet_name)
self.node_data = BEOSNode.BEOSNodeData(_node_ip, _node_port, _keosd_ip, _keosd_port, _wallet_name)
eosio_rpc_actions.EOSIO = eosio_rpc_client.EosioInterface(self.node_data.node_ip, self.node_data.node_port, self.node_data.keosd_ip, self.node_data.keosd_port)
eosio_rpc_actions.logger.handlers=[]
eosio_rpc_actions.logger = log
self.url = eosio_rpc_client.EosioInterface(self.node_data.node_ip, self.node_data.node_port, self.node_data.keosd_ip, self.node_data.keosd_port)
self.node_is_running = False
self.node_number = int(self.node_data.node_port)
self.node_name = "{0}-{1}".format(BEOSNode.node, self.node_number)
self.log_path = None
self.working_dir = None
self.additiona_prod = {}
self.delay_block = 0
self.user_name = list("aaaaaaaaaaaa")
def generate_user_name(self):
name = list(self.user_name)
self.user_name[0] = chr(ord(self.user_name[0]) + 1)
for i, _ in enumerate(self.user_name):
if ord(self.user_name[i]) > ord('z'):
self.user_name[i] = 'a'
self.user_name[i+1] = chr(ord(self.user_name[i+1]) + 1)
return ''.join(name)
def create_accounts(self, _nr_of_accounts, _init_value = None):
tester = collections.namedtuple("Tester", ('name','akey','okey','init_value'))
accounts = []
init_value = _init_value
if init_value :
value = init_value
else:
value = None
if self.node_is_running:
stop_node = False
else:
self.run_node()
stop_node = True
for _ in range(_nr_of_accounts):
akey = self.utils.create_key()
okey = self.utils.create_key()
name = self.generate_user_name()
self.create_account(name,_activ_key=akey, _owner_key=okey)
if value:
self.issue(_from="beos.gateway", _to=name, _quantity=value, _memo="init_value")
accounts.append(tester(name, akey, okey, value))
if stop_node:
self.stop_node()
return accounts
def create_producers(self, _nr_of_producers, _init_value = None):
producers = self.create_accounts(_nr_of_producers, _init_value)
for producer in producers:
self.add_producer_to_config(producer.name, producer.akey)
if self.node_is_running:
#we need to rerun node to set producers
self.stop_node()
self.run_node()
return producers
def add_producer_to_config(self, _producer, _key):
try:
self.additiona_prod[_producer]=_key
except Exception as _ex:
log.exception("Exception `{0}` occures during adding producers`{1}`".format(str(_ex), self.node_name))
def set_node_dirs(self, _workdir, _log_path):
try:
self.log_path = _log_path
self.working_dir = _workdir
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
run.clone_nodeos(self.working_dir, self.node_number, self.node_name, None, True)
except Exception as _ex:
log.exception("Exception `{0}` occures during setting node dirs `{1}`".format(str(_ex), self.node_name))
def run_node(self):
try:
run.clone_nodeos(self.working_dir, self.node_number, self.node_name, self.additiona_prod)
run.run_custom_nodeos(self.node_number, self.node_name, self.working_dir, self.log_path)
self.node_is_running = True
self.start_block_nr = self.utils.get_info()["head_block_num"]
return self.start_block_nr
except Exception as _ex:
log.exception("Exception `{0}` occures during initialization of node `{1}`".format(str(_ex), self.node_name))
def stop_node(self):
try:
if self.node_is_running:
run.kill_process(self.working_dir+"/run_nodeos_{0}_{1}.pid".format(self.node_number, self.node_name,), "nodeos", self.node_data.node_ip, self.node_data.node_port)
self.node_is_running = False
except Exception as _ex:
log.exception("Exception `{0}` occures during stoping node `{1}`".format(str(_ex), self.node_name))
#def changeparams(self, _asset, _election_block, _beos_params, _ram_params, _ram_leftover):
def changeparams(self, _newparams):
try:
election_block = {"starting_block_for_initial_witness_election":_newparams["starting_block_for_initial_witness_election"]+ self.start_block_nr}
changeparams_init = patterns.ChangeparamsInitAction( election_block )
self.make_action_call(changeparams_init.make_action())
asset = {"proxy_assets":_newparams["proxy_assets"]}
ram_leftover = {"ram_leftover":_newparams["ram_leftover"]}
beos_params = {"beos":{ "starting_block": _newparams["beos"]["starting_block"] + self.start_block_nr,
"next_block": _newparams["beos"]["next_block"],
"ending_block":_newparams["beos"]["ending_block"] + self.start_block_nr,
"block_interval":_newparams["beos"]["block_interval"] ,
"trustee_reward":_newparams["beos"]["trustee_reward"] } }
ram_params = {"ram":{ "starting_block":_newparams["ram"]["starting_block"] + self.start_block_nr,
"next_block":_newparams["ram"]["next_block"],
"ending_block":_newparams["ram"]["ending_block"] + self.start_block_nr,
"block_interval":_newparams["ram"]["block_interval"] ,
"trustee_reward":_newparams["ram"]["trustee_reward"] } }
changeparams_distro = patterns.ChangeparamsDistributionAction(asset, beos_params, ram_params, ram_leftover)
return self.make_action_call(changeparams_distro.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "changeparams"))
def issue(self, _from, _to, _quantity, _memo, _authorized_by = None):
try:
issue = patterns.IssueAction(_from, _to, _quantity, _memo, _authorized_by=_authorized_by)
return self.make_action_call(issue.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "issue"))
def withdraw(self, _from, _bts_to, _quantity, _memo, _authorized_by = None):
try:
withdraw = patterns.WithdrawAction(_from, _bts_to, _quantity, _memo, _authorized_by=_authorized_by)
return self.make_action_call(withdraw.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "withdraw"))
def buyram(self, _payer, _receiver, _quant, _authorized_by = None):
try:
buyram = patterns.BuyRamAction(_payer, _receiver, _quant, _authorized_by=_authorized_by)
return self.make_action_call(buyram.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "buyram"))
def buyrambytes(self, _payer, _receiver, _bytes, _authorized_by = None):
try:
buyrambytes = patterns.BuyRamBytesAction(_payer, _receiver, _bytes, _authorized_by=_authorized_by)
return self.make_action_call(buyrambytes.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "buyrambytes"))
def delegatebw(self, _from, _receiver, _stake_net_quantity, _stake_cpu_quantity, _transfer, _authorized_by = None):
try:
delegatebw = patterns.DelegatebwAction(_from, _receiver, _stake_net_quantity, _stake_cpu_quantity, _transfer, _authorized_by=_authorized_by)
return self.make_action_call(delegatebw.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "delegatebw"))
def regproducer(self, _producer, _producer_key, _url = "", _location = 0, _authorized_by = None):
try:
regproducer = patterns.RegproducerAction( _producer, _producer_key, _url, _location, _authorized_by=_authorized_by)
return self.make_action_call(regproducer.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "regproducer"))
def unregprod(self, _producer, _authorized_by = None):
try:
unregprod = patterns.UnregprodAction( _producer, _authorized_by=_authorized_by)
return self.make_action_call(unregprod.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "unregprod"))
def sellram(self, _account, _bytes, _authorized_by = None):
try:
sellram = patterns.SellramAction(_account, _bytes, _authorized_by=_authorized_by)
return self.make_action_call(sellram.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "sellram"))
def transfer(self, _from, _to, _quantity, _memo, _authorized_by = None):
try:
transfer = patterns.TransferAction(_from, _to, _quantity, _memo, _authorized_by=_authorized_by)
return self.make_action_call(transfer.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "transfer"))
def undelegatebw(self, _from, _receiver, _unstake_net_quantity, _unstake_cpu_quantity, _authorized_by = None):
try:
undelegatebw = patterns.UndelegatebwAction(_from, _receiver, _unstake_net_quantity, _unstake_cpu_quantity, _authorized_by=_authorized_by)
return self.make_action_call(undelegatebw.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "undelegatebw"))
def voteproducer(self, _voter, _proxy, _producers, _authorized_by = None):
try:
voteproducer = patterns.VoteproducerAction(_voter, _proxy, _producers, _authorized_by=_authorized_by)
return self.make_action_call(voteproducer.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "voteproducer"))
def create_account(self, _name, _activ_key = None, _owner_key = None, _init_ram = True, _authorized_by = None, _creator = None,):
try:
if not _creator:
_creator = "beos.gateway"
if not _activ_key:
_activ_key = self.utils.create_key()
if not _owner_key:
_owner_key = self.utils.create_key()
create_account = patterns.CreateAccountAction(_creator, _name, _owner_key, _activ_key, _init_ram)
return self.make_action_call(create_account.make_action())
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}` action.".format(str(_ex), "create_account"))
def wait_till_block(self, _block):
try:
while self.start_block_nr + _block > int(self.utils.get_info()["head_block_num"]):
time.sleep(0.5)
continue
pass
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}`.".format(str(_ex), "wait_till_block"))
def wait_n_blocks(self, _blocks_to_wait):
try:
start = int(self.utils.get_info()["head_block_num"])
while (start + _blocks_to_wait) > int(self.utils.get_info()["head_block_num"]):
time.sleep(0.5)
continue
pass
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}`.".format(str(_ex), "wait_till_block"))
def make_cleos_call(self, _params):
try:
return self.cleos.make_call(_params)
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}`.".format(str(_ex), "make_cleos_call"))
def get_url_caller(self):
try:
return self.url
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}`.".format(str(_ex), "make_url_call"))
def make_action_call(self, _action):
try:
account = _action["code"]
actor = _action.pop("authorized_by","")
action = {_action["action"]:_action}
delay_block, resp = eosio_rpc_actions.push_action(account,actor,action,"active",True)
if delay_block:
self.delay_block += delay_block
if "transaction_id" in resp:
log.info("[ACTION][OK] `%s` pushed to block `%d (delay %d)`"%(_action, resp["processed"]["block_num"], self.delay_block))
return ActionResult(True, "", resp, _action)
else:
log.error("[ACTION][ERROR] failed to push action `%s` to block because %s"%(_action, resp["error"]["details"][0]["message"]))
return ActionResult(False, resp["error"]["details"], resp, _action)
except Exception as _ex:
log.exception("Exception `{0}` occures during `{1}`.".format(str(_ex), "make_action_call"))
return ActionResult(False, "", "", _action)
| 13,653
| 910
| 23
|
719709584da1e07e58c9bd58392d0adc1bb2c4ee
| 1,658
|
py
|
Python
|
sct_custom/unit_testing/test_utils.py
|
nidebroux/lumbosacral_segmentation
|
3217960c6f0f5c3886dfdf46e1286ad2f737f4aa
|
[
"Unlicense",
"MIT"
] | 1
|
2021-09-07T08:52:21.000Z
|
2021-09-07T08:52:21.000Z
|
sct_custom/unit_testing/test_utils.py
|
nidebroux/lumbosacral_segmentation
|
3217960c6f0f5c3886dfdf46e1286ad2f737f4aa
|
[
"Unlicense",
"MIT"
] | null | null | null |
sct_custom/unit_testing/test_utils.py
|
nidebroux/lumbosacral_segmentation
|
3217960c6f0f5c3886dfdf46e1286ad2f737f4aa
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8
# pytest unit tests for utils
import pytest
from spinalcordtoolbox import utils
def test_sct_argument_parser(capsys):
"""Test extra argparse functionality added by SCTArgumentParser subclass."""
# Check that new defaults can still be overridden (setting add_help via args AND kwargs)
parser1 = utils.SCTArgumentParser(None, None, None, None, [], utils.SmartFormatter, '-', None, None, 'error', True)
assert parser1.add_help is True
parser2 = utils.SCTArgumentParser(add_help=True)
assert parser2.add_help is True
# Check that new defaults are set properly
parser3 = utils.SCTArgumentParser()
assert parser3.prog == "test_utils"
assert parser3.formatter_class == utils.SmartFormatter
assert parser3.add_help is False
# Check that error is thrown when required argument isn't passed
parser3.add_argument('-r', '--required', required=True)
parser3.add_argument('-h', "--help", help="show this message and exit", action="help")
with pytest.raises(SystemExit) as e:
parser3.parse_args()
assert e.value.code == 2
# Check help message is still output when above error is thrown
captured = capsys.readouterr()
assert "usage: test_utils" in captured.err
# Ensure no error is thrown when help is explicitly called
with pytest.raises(SystemExit) as e:
parser3.parse_args(['-h'])
assert e.value.code == 0
| 36.844444
| 119
| 0.702654
|
#!/usr/bin/env python
# -*- coding: utf-8
# pytest unit tests for utils
import pytest
from spinalcordtoolbox import utils
def test_parse_num_list_inv():
assert utils.parse_num_list_inv([1, 2, 3, 5, 6, 9]) == '1:3;5:6;9'
assert utils.parse_num_list_inv([3, 2, 1, 5]) == '1:3;5'
assert utils.parse_num_list_inv([]) == ''
def test_sct_argument_parser(capsys):
"""Test extra argparse functionality added by SCTArgumentParser subclass."""
# Check that new defaults can still be overridden (setting add_help via args AND kwargs)
parser1 = utils.SCTArgumentParser(None, None, None, None, [], utils.SmartFormatter, '-', None, None, 'error', True)
assert parser1.add_help is True
parser2 = utils.SCTArgumentParser(add_help=True)
assert parser2.add_help is True
# Check that new defaults are set properly
parser3 = utils.SCTArgumentParser()
assert parser3.prog == "test_utils"
assert parser3.formatter_class == utils.SmartFormatter
assert parser3.add_help is False
# Check that error is thrown when required argument isn't passed
parser3.add_argument('-r', '--required', required=True)
parser3.add_argument('-h', "--help", help="show this message and exit", action="help")
with pytest.raises(SystemExit) as e:
parser3.parse_args()
assert e.value.code == 2
# Check help message is still output when above error is thrown
captured = capsys.readouterr()
assert "usage: test_utils" in captured.err
# Ensure no error is thrown when help is explicitly called
with pytest.raises(SystemExit) as e:
parser3.parse_args(['-h'])
assert e.value.code == 0
| 187
| 0
| 23
|
fe045c5343a727ea1cacb5fd6253239d05f5337b
| 5,336
|
py
|
Python
|
functions/metrics.py
|
cainmagi/MDNT
|
4affd8a83698ce6786c04dddacdcf7415f8c5f90
|
[
"MIT"
] | 14
|
2019-09-24T07:33:13.000Z
|
2021-03-04T16:27:29.000Z
|
functions/metrics.py
|
cainmagi/MDNT
|
4affd8a83698ce6786c04dddacdcf7415f8c5f90
|
[
"MIT"
] | 1
|
2020-02-28T04:24:09.000Z
|
2020-03-03T08:55:31.000Z
|
functions/metrics.py
|
cainmagi/MDNT
|
4affd8a83698ce6786c04dddacdcf7415f8c5f90
|
[
"MIT"
] | 6
|
2020-08-24T03:35:41.000Z
|
2021-02-10T08:02:16.000Z
|
'''
################################################################
# Functions - Metrics
# @ Modern Deep Network Toolkits for Tensorflow-Keras
# Yuchen Jin @ cainmagi@gmail.com
# Requirements: (Pay attention to version)
# python 3.6+
# tensorflow r1.13+
# Extend metrics. These functions should not be used as train-
# ing losses.
# Version: 0.10 # 2019/6/13
# Comments:
# Create this submodule, and finish signal_to_noise,
# correlation and jaccard_index.
################################################################
'''
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import control_flow_ops
from .others import get_channels
def signal_to_noise(y_true, y_pred, mode='snr', data_format=None, epsilon=1e-8):
'''Signal-to-noise ratio. (metric)
Calculate the signal-to-noise ratio. It support different modes.
Arguments:
mode: (1) snr: mean [ y_true^2 / (y_pred - y_true)^2 ]
(2) psnr: mean [ max( y_true^2 ) / (y_pred - y_true)^2 ]
data_format: 'channels_first' or 'channels_last'. The default setting is generally
'channels_last' like other tf.keras APIs.
epsilon: used for avoid zero division.
Input:
y_true: label, tensor in any shape.
y_pred: prediction, tensor in any shape.
Output:
scalar, the mean SNR.
'''
get_reduced_axes = get_channels(y_true, data_format)
if mode.casefold() == 'psnr':
signal = math_ops.reduce_max(gen_math_ops.square(y_true), axis=get_reduced_axes)
else:
signal = math_ops.reduce_sum(gen_math_ops.square(y_true), axis=get_reduced_axes)
noise = math_ops.reduce_sum(gen_math_ops.square(y_true - y_pred), axis=get_reduced_axes) + epsilon
coeff = (10.0/2.3025851) # 10/log_e(10)
return coeff*math_ops.reduce_mean(gen_math_ops.log(math_ops.divide(signal, noise)))
def correlation(y_true, y_pred):
'''Pearson correlation coefficient. (metric)
The linear corrlation between y_true and y_pred is between -1.0 and 1.0, indicating
positive correlation and negative correlation respectively. In particular, if the
correlation is 0.0, it means y_true and y_pred are irrelevant linearly.
This function is implemented by:
corr = [mean(y_true * y_pred) - mean(y_true) * mean(y_pred)]
/ [ std(y_true) * std(m_y_pred) ]
This function has been revised to prevent the division fail (0/0). When either y_true
or y_pred is 0, the correlation would be set as 0.0.
Input:
y_true: label, tensor in any shape.
y_pred: prediction, tensor in any shape.
Output:
scalar, the mean linear correlation between y_true and y_pred.
'''
m_y_true = math_ops.reduce_mean(y_true, axis=0)
m_y_pred = math_ops.reduce_mean(y_pred, axis=0)
s_y_true = gen_math_ops.sqrt(math_ops.reduce_mean(gen_math_ops.square(y_true), axis=0) - gen_math_ops.square(m_y_true))
s_y_pred = gen_math_ops.sqrt(math_ops.reduce_mean(gen_math_ops.square(y_pred), axis=0) - gen_math_ops.square(m_y_pred))
s_denom = s_y_true * s_y_pred
s_numer = math_ops.reduce_mean(y_true * y_pred, axis=0) - m_y_true * m_y_pred
s_index = gen_math_ops.greater(s_denom, 0)
f1 = lambda: constant_op.constant(0.0)
f2 = lambda: math_ops.reduce_mean(array_ops.boolean_mask(s_numer,s_index)/array_ops.boolean_mask(s_denom,s_index))
return control_flow_ops.case([(math_ops.reduce_any(s_index), f2)], default=f1)
def jaccard_index(y_true, y_pred, data_format=None):
'''Jaccard index, or Intersection over Union (IoU). (metric)
The IoU is thought to be a better measurement to estimate the accuracy for segmentation.
If both y_true and y_pred are binary, the intersection I(y_true, y_pred) shows the part
where the prediction is correct, while the union U(y_true, y_pred) contains both correct
prediction and wrong prediction. I/U shows the proportion of correct prediction.
Compared to other error functions (like MSE), it is more concentrated on the part where
y_true=1 or y_pred=1.
This function is implemented by:
jacc = logical_and(y_true, y_pred) / logical_or(y_true, y_pred)
Arguments:
data_format: 'channels_first' or 'channels_last'. The default setting is generally
'channels_last' like other tf.keras APIs.
Input:
y_true: label, tensor in any shape, should have at least 3 axes.
y_pred: prediction, tensor in any shape, should have at least 3 axes.
Output:
scalar, the mean Jaccard index between y_true and y_pred over all channels.
'''
get_reduced_axes = get_channels(y_true, data_format)
bin_y_true = gen_math_ops.greater(y_true, 0.5)
bin_y_pred = gen_math_ops.greater(y_pred, 0.5)
valNumer = gen_math_ops.logical_and(bin_y_pred, bin_y_true)
valDomin = gen_math_ops.logical_or(bin_y_pred, bin_y_true)
valNumer = math_ops.reduce_sum(math_ops.cast(valNumer, dtype=y_pred.dtype), axis=get_reduced_axes)
valDomin = math_ops.reduce_sum(math_ops.cast(valDomin, dtype=y_pred.dtype), axis=get_reduced_axes)
return math_ops.reduce_mean(math_ops.div_no_nan(valNumer, valDomin))
| 52.313725
| 123
| 0.701462
|
'''
################################################################
# Functions - Metrics
# @ Modern Deep Network Toolkits for Tensorflow-Keras
# Yuchen Jin @ cainmagi@gmail.com
# Requirements: (Pay attention to version)
# python 3.6+
# tensorflow r1.13+
# Extend metrics. These functions should not be used as train-
# ing losses.
# Version: 0.10 # 2019/6/13
# Comments:
# Create this submodule, and finish signal_to_noise,
# correlation and jaccard_index.
################################################################
'''
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import control_flow_ops
from .others import get_channels
def signal_to_noise(y_true, y_pred, mode='snr', data_format=None, epsilon=1e-8):
'''Signal-to-noise ratio. (metric)
Calculate the signal-to-noise ratio. It support different modes.
Arguments:
mode: (1) snr: mean [ y_true^2 / (y_pred - y_true)^2 ]
(2) psnr: mean [ max( y_true^2 ) / (y_pred - y_true)^2 ]
data_format: 'channels_first' or 'channels_last'. The default setting is generally
'channels_last' like other tf.keras APIs.
epsilon: used for avoid zero division.
Input:
y_true: label, tensor in any shape.
y_pred: prediction, tensor in any shape.
Output:
scalar, the mean SNR.
'''
get_reduced_axes = get_channels(y_true, data_format)
if mode.casefold() == 'psnr':
signal = math_ops.reduce_max(gen_math_ops.square(y_true), axis=get_reduced_axes)
else:
signal = math_ops.reduce_sum(gen_math_ops.square(y_true), axis=get_reduced_axes)
noise = math_ops.reduce_sum(gen_math_ops.square(y_true - y_pred), axis=get_reduced_axes) + epsilon
coeff = (10.0/2.3025851) # 10/log_e(10)
return coeff*math_ops.reduce_mean(gen_math_ops.log(math_ops.divide(signal, noise)))
def correlation(y_true, y_pred):
'''Pearson correlation coefficient. (metric)
The linear corrlation between y_true and y_pred is between -1.0 and 1.0, indicating
positive correlation and negative correlation respectively. In particular, if the
correlation is 0.0, it means y_true and y_pred are irrelevant linearly.
This function is implemented by:
corr = [mean(y_true * y_pred) - mean(y_true) * mean(y_pred)]
/ [ std(y_true) * std(m_y_pred) ]
This function has been revised to prevent the division fail (0/0). When either y_true
or y_pred is 0, the correlation would be set as 0.0.
Input:
y_true: label, tensor in any shape.
y_pred: prediction, tensor in any shape.
Output:
scalar, the mean linear correlation between y_true and y_pred.
'''
m_y_true = math_ops.reduce_mean(y_true, axis=0)
m_y_pred = math_ops.reduce_mean(y_pred, axis=0)
s_y_true = gen_math_ops.sqrt(math_ops.reduce_mean(gen_math_ops.square(y_true), axis=0) - gen_math_ops.square(m_y_true))
s_y_pred = gen_math_ops.sqrt(math_ops.reduce_mean(gen_math_ops.square(y_pred), axis=0) - gen_math_ops.square(m_y_pred))
s_denom = s_y_true * s_y_pred
s_numer = math_ops.reduce_mean(y_true * y_pred, axis=0) - m_y_true * m_y_pred
s_index = gen_math_ops.greater(s_denom, 0)
f1 = lambda: constant_op.constant(0.0)
f2 = lambda: math_ops.reduce_mean(array_ops.boolean_mask(s_numer,s_index)/array_ops.boolean_mask(s_denom,s_index))
return control_flow_ops.case([(math_ops.reduce_any(s_index), f2)], default=f1)
def jaccard_index(y_true, y_pred, data_format=None):
'''Jaccard index, or Intersection over Union (IoU). (metric)
The IoU is thought to be a better measurement to estimate the accuracy for segmentation.
If both y_true and y_pred are binary, the intersection I(y_true, y_pred) shows the part
where the prediction is correct, while the union U(y_true, y_pred) contains both correct
prediction and wrong prediction. I/U shows the proportion of correct prediction.
Compared to other error functions (like MSE), it is more concentrated on the part where
y_true=1 or y_pred=1.
This function is implemented by:
jacc = logical_and(y_true, y_pred) / logical_or(y_true, y_pred)
Arguments:
data_format: 'channels_first' or 'channels_last'. The default setting is generally
'channels_last' like other tf.keras APIs.
Input:
y_true: label, tensor in any shape, should have at least 3 axes.
y_pred: prediction, tensor in any shape, should have at least 3 axes.
Output:
scalar, the mean Jaccard index between y_true and y_pred over all channels.
'''
get_reduced_axes = get_channels(y_true, data_format)
bin_y_true = gen_math_ops.greater(y_true, 0.5)
bin_y_pred = gen_math_ops.greater(y_pred, 0.5)
valNumer = gen_math_ops.logical_and(bin_y_pred, bin_y_true)
valDomin = gen_math_ops.logical_or(bin_y_pred, bin_y_true)
valNumer = math_ops.reduce_sum(math_ops.cast(valNumer, dtype=y_pred.dtype), axis=get_reduced_axes)
valDomin = math_ops.reduce_sum(math_ops.cast(valDomin, dtype=y_pred.dtype), axis=get_reduced_axes)
return math_ops.reduce_mean(math_ops.div_no_nan(valNumer, valDomin))
| 0
| 0
| 0
|
e84a278c0eeb538e35f49840e27139eb36c6438e
| 12,236
|
py
|
Python
|
examples/lorenz.py
|
josephbakarji/deep-delay-autoencoder
|
eed23447f930aac140b0f37b888ccab688aa6294
|
[
"MIT"
] | 2
|
2022-03-08T20:26:07.000Z
|
2022-03-28T21:22:00.000Z
|
examples/lorenz.py
|
josephbakarji/deep-delay-autoencoder
|
eed23447f930aac140b0f37b888ccab688aa6294
|
[
"MIT"
] | null | null | null |
examples/lorenz.py
|
josephbakarji/deep-delay-autoencoder
|
eed23447f930aac140b0f37b888ccab688aa6294
|
[
"MIT"
] | 1
|
2022-03-28T21:22:23.000Z
|
2022-03-28T21:22:23.000Z
|
import numpy as np
from scipy.integrate import odeint
from scipy.special import legendre, chebyt
import sys
sys.path.append('../src')
from sindy_utils import library_size
from data_manage import DataStruct
import pdb
#################################
################################
################################
###### FROM example_lorenz_delay.py (NOT FIXED) #########3
# def get_lorenz_data_withDelaysAsz(n_training_ics, n_validation_ics, n_test_ics, n_delays):
# t = np.arange(0, 10, .002)
# n_steps = t.size - n_delays
# N = n_delays
# ic_means = np.array([0,0,25])
# ic_widths = 2*np.array([36,48,41])
# d = 3
# noise_strength = 0
# # training data
# ics = ic_widths*(np.random.rand(n_training_ics, 3)-.5) + ic_means
# training_data = generate_lorenz_data(ics, t, N, normalization=np.array([1/40,1/40,1/40]))
# training_data['x'] = training_data['x'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_training_ics,N)
# training_data['dx'] = training_data['dx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_training_ics,N)
# training_data['ddx'] = training_data['ddx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_training_ics,N)
# U,s,V = np.linalg.svd(training_data['x'], full_matrices=False)
# training_data['z'] = U[:,0:d]
# # validation data
# ics = ic_widths*(np.random.rand(n_validation_ics, 3)-.5) + ic_means
# validation_data = generate_lorenz_data(ics, t, N, normalization=np.array([1/40,1/40,1/40]))
# validation_data['x'] = validation_data['x'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_validation_ics,N)
# validation_data['dx'] = validation_data['dx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_validation_ics,N)
# validation_data['ddx'] = validation_data['ddx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_validation_ics,N)
# validation_data['z'] = (np.dot(validation_data['x'], V.T)/s)[:,0:d]
# # test data
# ics = ic_widths*(np.random.rand(n_test_ics, 3)-.5) + ic_means
# test_data = generate_lorenz_data(ics, t, N, normalization=np.array([1/40,1/40,1/40]))
# test_data['x'] = test_data['x'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_test_ics,N)
# test_data['dx'] = test_data['dx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_test_ics,N)
# test_data['ddx'] = test_data['ddx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_test_ics,N)
# test_data['z'] = (np.dot(test_data['x'], V.T)/s)[:,0:d]
# return training_data, validation_data, test_data
| 40.923077
| 143
| 0.538003
|
import numpy as np
from scipy.integrate import odeint
from scipy.special import legendre, chebyt
import sys
sys.path.append('../src')
from sindy_utils import library_size
from data_manage import DataStruct
import pdb
class Lorenz:
def __init__(self,
option='delay',
coefficients=[10, 8/3, 28.],
noise=0.0,
input_dim=128,
normalization=[1/40, 1/40, 1/40],
linear=False,
poly_order=3):
self.option = option
self.sigma = coefficients[0]
self.beta = coefficients[1]
self.rho = coefficients[2]
self.noise = noise
self.input_dim = input_dim
self.normalization = np.array(normalization) if normalization is not None else np.array([1, 1, 1])
self.linear = linear
self.poly_order = poly_order
def get_solution(self, n_ics, tend, dt, ic_means=[0, 0, 25], ic_widths=[36, 48, 41], tau=None):
"""
Generate a set of Lorenz training data for multiple random initial conditions.
Arguments:
n_ics - Integer specifying the number of initial conditions to use.
noise_strength - Amount of noise to add to the data.
Return:
data - Dictionary containing elements of the dataset. See generate_lorenz_data()
doc string for list of contents.
"""
t = np.arange(0, tend, dt)
n_steps = len(t) - self.input_dim
if tau is not None:
n_steps = len(t) - self.input_dim * int(tau/dt)
ic_means = np.array(ic_means)
ic_widths = 2 * np.array(ic_widths)
# training data
ics = ic_widths*(np.random.rand(n_ics, 3)-.5) + ic_means
data = self.generate_data(ics, t, tau=tau)
data.x = data.x.reshape((n_steps*n_ics, self.input_dim)) \
+ self.noise * np.random.randn(n_steps * n_ics, self.input_dim)
data.dx = data.dx.reshape((n_steps*n_ics, self.input_dim)) \
+ self.noise * np.random.randn(n_steps * n_ics, self.input_dim)
data.ddx = data.ddx.reshape((n_steps*n_ics, self.input_dim)) \
+ self.noise * np.random.randn(n_steps * n_ics, self.input_dim)
full_z = data.z[0, :-self.input_dim, :]
full_dz = data.dz[0, :-self.input_dim, :]
full_ddz = data.ddz[0, :-self.input_dim, :]
for i in range(1, data.z.shape[0]):
full_z = np.concatenate((full_z, data.z[i, :-self.input_dim, :]), axis=0)
full_dz = np.concatenate((full_dz, data.dz[i, :-self.input_dim, :]), axis=0)
full_ddz = np.concatenate((full_ddz, data.ddz[i, :-self.input_dim, :]), axis=0)
data.z = full_z
data.dz = full_dz
data.ddz = full_ddz
return data
def simulate_lorenz(self, z0, t):
"""
Simulate the Lorenz dynamics.
Arguments:
z0 - Initial condition in the form of a 3-value list or array.
t - Array of time points at which to simulate.
sigma, beta, rho - Lorenz parameters
Returns:
z, dz, ddz - Arrays of the trajectory values and their 1st and 2nd derivatives.
"""
f = lambda z,t : [self.sigma*(z[1] - z[0]), z[0]*(self.rho - z[2]) - z[1], z[0]*z[1] - self.beta*z[2]]
df = lambda z,dz,t : [self.sigma*(dz[1] - dz[0]),
dz[0]*(self.rho - z[2]) + z[0]*(-dz[2]) - dz[1],
dz[0]*z[1] + z[0]*dz[1] - self.beta*dz[2]]
z = odeint(f, z0, t)
dt = t[1] - t[0]
dz = np.zeros(z.shape)
ddz = np.zeros(z.shape)
for i in range(t.size):
dz[i] = f(z[i],dt*i)
ddz[i] = df(z[i], dz[i], dt*i)
return z, dz, ddz
def generate_data(self, ics, t, tau=None):
"""
Generate high-dimensional Lorenz data set.
Arguments:
ics - Nx3 array of N initial conditions
t - array of time points over which to simulate
n_points - size of the high-dimensional dataset created
linear - Boolean value. If True, high-dimensional dataset is a linear combination
of the Lorenz dynamics. If False, the dataset also includes cubic modes.
normalization - Optional 3-value array for rescaling the 3 Lorenz variables.
sigma, beta, rho - Parameters of the Lorenz dynamics.
Returns:
data - Dictionary containing elements of the dataset. This includes the time points (t),
spatial mapping (y_spatial), high-dimensional modes used to generate the full dataset
(modes), low-dimensional Lorenz dynamics (z, along with 1st and 2nd derivatives dz and
ddz), high-dimensional dataset (x, along with 1st and 2nd derivatives dx and ddx), and
the true Lorenz coefficient matrix for SINDy.
"""
n_ics = ics.shape[0]
n_steps = t.size - self.input_dim # careful consistency
dt = t[1]-t[0]
d = 3
z = np.zeros((n_ics, t.size, d))
dz = np.zeros(z.shape)
ddz = np.zeros(z.shape)
for i in range(n_ics):
z[i], dz[i], ddz[i] = self.simulate_lorenz(ics[i], t)
if self.normalization is not None:
z *= self.normalization
dz *= self.normalization
ddz *= self.normalization
n = self.input_dim
if self.option == 'delay':
n_delays = n
if tau is None:
x = np.zeros((n_ics, n_steps, n_delays))
dx = np.zeros(x.shape)
ddx = np.zeros(x.shape)
for j in range(n):
x[:, :, j] = z[:, j:j+n_steps, 0]
dx[:, :, j] = dz[:, j:j+n_steps, 0]
ddx[:, :, j] = ddz[:, j:j+n_steps, 0]
else:
didx = int(tau/dt)
n_steps = t.size - self.input_dim *didx
x = np.zeros((n_ics, n_steps, n_delays))
dx = np.zeros(x.shape)
ddx = np.zeros(x.shape)
for j in range(n):
jz = j*didx
x[:,:,j] = z[:, jz:jz+n_steps, 0]
dx[:,:,j] = dz[:, jz:jz+n_steps, 0]
ddx[:,:,j] = ddz[:, jz:jz+n_steps, 0]
elif self.option == 'projection':
L = 1
y_spatial = np.linspace(-L, L, n)
modes = np.zeros((2*d, n))
for i in range(2*d):
modes[i] = legendre(i)(y_spatial)
# modes[i] = chebyt(i)(y_spatial)
# modes[i] = np.cos((i+1)*np.pi*y_spatial/2)
x1 = np.zeros((n_ics,n_steps,n))
x2 = np.zeros((n_ics,n_steps,n))
x3 = np.zeros((n_ics,n_steps,n))
x4 = np.zeros((n_ics,n_steps,n))
x5 = np.zeros((n_ics,n_steps,n))
x6 = np.zeros((n_ics,n_steps,n))
x = np.zeros((n_ics,n_steps,n))
dx = np.zeros(x.shape)
ddx = np.zeros(x.shape)
for i in range(n_ics):
for j in range(n_steps):
x1[i,j] = modes[0]*z[i,j,0]
x2[i,j] = modes[1]*z[i,j,1]
x3[i,j] = modes[2]*z[i,j,2]
x4[i,j] = modes[3]*z[i,j,0]**3
x5[i,j] = modes[4]*z[i,j,1]**3
x6[i,j] = modes[5]*z[i,j,2]**3
x[i,j] = x1[i,j] + x2[i,j] + x3[i,j]
if not self.linear:
x[i,j] += x4[i,j] + x5[i,j] + x6[i,j]
dx[i,j] = modes[0]*dz[i,j,0] + modes[1]*dz[i,j,1] + modes[2]*dz[i,j,2]
if not self.linear:
dx[i,j] += modes[3]*3*(z[i,j,0]**2)*dz[i,j,0] + modes[4]*3*(z[i,j,1]**2)*dz[i,j,1] + modes[5]*3*(z[i,j,2]**2)*dz[i,j,2]
ddx[i,j] = modes[0]*ddz[i,j,0] + modes[1]*ddz[i,j,1] + modes[2]*ddz[i,j,2]
if not self.linear:
ddx[i,j] += modes[3]*(6*z[i,j,0]*dz[i,j,0]**2 + 3*(z[i,j,0]**2)*ddz[i,j,0]) \
+ modes[4]*(6*z[i,j,1]*dz[i,j,1]**2 + 3*(z[i,j,1]**2)*ddz[i,j,1]) \
+ modes[5]*(6*z[i,j,2]*dz[i,j,2]**2 + 3*(z[i,j,2]**2)*ddz[i,j,2])
else:
raise Exception('Invalid option')
sindy_coefficients = self.lorenz_coefficients()
# Can be made a object rather than dictionary (part of class)
data = DataStruct(name='full_sim')
data.t = t
data.x = x
data.dx = dx
data.ddx = ddx
data.z = z
data.dz = dz
data.ddz = ddz
data.sindy_coefficients = sindy_coefficients.astype(np.float32)
if self.option == 'projection':
data.y_spatial = y_spatial
data.modes = modes
return data
def lorenz_coefficients(self):
"""
Generate the SINDy coefficient matrix for the Lorenz system.
Arguments:
normalization - 3-element list of array specifying scaling of each Lorenz variable
poly_order - Polynomial order of the SINDy model.
sigma, beta, rho - Parameters of the Lorenz system
"""
Xi = np.zeros((library_size(3, self.poly_order), 3))
Xi[1,0] = -self.sigma
Xi[2,0] = self.sigma*self.normalization[0]/self.normalization[1]
Xi[1,1] = self.rho*self.normalization[1]/self.normalization[0]
Xi[2,1] = -1
Xi[6,1] = -self.normalization[1]/(self.normalization[0]*self.normalization[2])
Xi[3,2] = -self.beta
Xi[5,2] = self.normalization[2]/(self.normalization[0]*self.normalization[1])
return Xi
#################################
################################
################################
###### FROM example_lorenz_delay.py (NOT FIXED) #########3
# def get_lorenz_data_withDelaysAsz(n_training_ics, n_validation_ics, n_test_ics, n_delays):
# t = np.arange(0, 10, .002)
# n_steps = t.size - n_delays
# N = n_delays
# ic_means = np.array([0,0,25])
# ic_widths = 2*np.array([36,48,41])
# d = 3
# noise_strength = 0
# # training data
# ics = ic_widths*(np.random.rand(n_training_ics, 3)-.5) + ic_means
# training_data = generate_lorenz_data(ics, t, N, normalization=np.array([1/40,1/40,1/40]))
# training_data['x'] = training_data['x'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_training_ics,N)
# training_data['dx'] = training_data['dx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_training_ics,N)
# training_data['ddx'] = training_data['ddx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_training_ics,N)
# U,s,V = np.linalg.svd(training_data['x'], full_matrices=False)
# training_data['z'] = U[:,0:d]
# # validation data
# ics = ic_widths*(np.random.rand(n_validation_ics, 3)-.5) + ic_means
# validation_data = generate_lorenz_data(ics, t, N, normalization=np.array([1/40,1/40,1/40]))
# validation_data['x'] = validation_data['x'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_validation_ics,N)
# validation_data['dx'] = validation_data['dx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_validation_ics,N)
# validation_data['ddx'] = validation_data['ddx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_validation_ics,N)
# validation_data['z'] = (np.dot(validation_data['x'], V.T)/s)[:,0:d]
# # test data
# ics = ic_widths*(np.random.rand(n_test_ics, 3)-.5) + ic_means
# test_data = generate_lorenz_data(ics, t, N, normalization=np.array([1/40,1/40,1/40]))
# test_data['x'] = test_data['x'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_test_ics,N)
# test_data['dx'] = test_data['dx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_test_ics,N)
# test_data['ddx'] = test_data['ddx'].reshape((-1,N)) + noise_strength*np.random.randn(n_steps*n_test_ics,N)
# test_data['z'] = (np.dot(test_data['x'], V.T)/s)[:,0:d]
# return training_data, validation_data, test_data
| 593
| 8,976
| 23
|
43a61b412fd3f3362900d8f61060bf01a5c105a7
| 5,142
|
py
|
Python
|
autoyt/images.py
|
JacobHP/AutoYT
|
da87773bdd2eea1d688a8c003d61d31888dbf3e5
|
[
"MIT"
] | 1
|
2022-02-03T13:21:48.000Z
|
2022-02-03T13:21:48.000Z
|
autoyt/images.py
|
JacobHP/AutoYT
|
da87773bdd2eea1d688a8c003d61d31888dbf3e5
|
[
"MIT"
] | null | null | null |
autoyt/images.py
|
JacobHP/AutoYT
|
da87773bdd2eea1d688a8c003d61d31888dbf3e5
|
[
"MIT"
] | null | null | null |
'''
Author: Jacob Howard-Parker
Functions for writing on images. Text args should be layed out as
specified in data/templates/image_configuration.json
'''
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import textwrap
import re
import pandas as pd
def create_base_image(background=(26,26,27), size=(1920, 1080)):
'''
Create plain image of given size and background color
'''
img = Image.new('RGB', size, background)
return img
def write_on_image(image, text_args, body):
'''
Write the text in the style and position specified in the text_args
on a given image.
Input: image - PIL image, text_args - nested dictionary, body - string
Ouput: image with string written based on specs in text_args
'''
color = text_args['color']
margin, offset = text_args['position']
font = ImageFont.truetype(text_args['font_loc'], text_args['font_size'])
width = text_args['width']
draw = ImageDraw.Draw(image)
for line in textwrap.wrap(body, width=width):
draw.text((margin, offset), line, color, font=font)
offset += font.getsize(line)[1]
return image
def open_and_write(base, text_args, text):
'''
Open and write on an image.
Input: base - path to image,
'''
img = Image.open(base)
img = write_on_image(img, text_args, text)
return img
def write_body_image(image, text_args, text):
'''
Write text onto an image
'''
color = text_args['color']
margin, offset = text_args['position']
font = ImageFont.truetype(text_args['font_loc'], text_args['font_size'])
width = text_args['width']
draw = ImageDraw.Draw(image)
lines=0
text_split = re.split('[\n][\n]+', text)
for text in text_split:
for line in textwrap.wrap(text, width=width):
draw.text((margin, offset), line, color, font=font)
offset += font.getsize(line)[1]
lines+=1
lines+=2 # for new paragraph
offset+=font.getsize(line)[1]
return image, lines
def long_reddit_image(text_args_dict, paragraph_list, author, points,
background, size):
'''
List of images by punctuation split
'''
image_list = []
lines=0
text=''
second = False
author_font = ImageFont.truetype(text_args_dict['author']['font_loc'],
text_args_dict['author']['font_size'])
points_position = (text_args_dict['points']['position'][0]\
+author_font.getsize(author)[0],
text_args_dict['points']['position'][1])
points_args = text_args_dict['points'].copy()
points_args['position'] = points_position
# points depends on author + we want to format it nicely
dot = u"\u00B7"
if points >= 1000:
points = f' {dot} {round(points/1000, 1)}k points'
else:
points = f' {dot} {points} points'
for paragraph in paragraph_list:
for idx in range(len(paragraph)):
# need to add if more than 40 lines etc.
if lines <= 20:
current_text = ' '.join(paragraph[:idx+1])
base_img = create_base_image(background, size)
image = write_on_image(base_img, text_args_dict['author'],
author)
image = write_on_image(image, points_args, points)
image, lines = write_body_image(image, text_args_dict['body'],
text+current_text)
image_list.append(image)
first_idx = idx
elif lines>20:
# start new image with secondary layout
if second==False:
text=''
first_idx = idx
second=True # only want once
current_text = ' '.join(paragraph[first_idx : idx+1])
base_img = create_base_image(background, size)
image, lines = write_body_image(base_img,
text_args_dict['body_second'],
text+current_text)
image_list.append(image)
lines+=20
first_idx=0 # track that its a new para now
text += current_text + ' \n\n'
return image_list
def create_intro_image(author, points, subreddit, body ,text_args_dict,
template = False):
'''
Create intro image
'''
if template:
intro = Image.open(template)
else:
intro = create_base_image()
draw = ImageDraw.Draw(intro)
# subreddit
intro = write_on_image(intro, text_args_dict['intro_subreddit'],
'r/'+subreddit)
# author
intro = write_on_image(intro, text_args_dict['intro_author'],
' '+ u"\u00B7" + ' u/'+author)
# body
intro = write_on_image(intro, text_args_dict['intro_body'], body)
return intro
| 32.544304
| 79
| 0.570012
|
'''
Author: Jacob Howard-Parker
Functions for writing on images. Text args should be layed out as
specified in data/templates/image_configuration.json
'''
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import textwrap
import re
import pandas as pd
def create_base_image(background=(26,26,27), size=(1920, 1080)):
'''
Create plain image of given size and background color
'''
img = Image.new('RGB', size, background)
return img
def write_on_image(image, text_args, body):
'''
Write the text in the style and position specified in the text_args
on a given image.
Input: image - PIL image, text_args - nested dictionary, body - string
Ouput: image with string written based on specs in text_args
'''
color = text_args['color']
margin, offset = text_args['position']
font = ImageFont.truetype(text_args['font_loc'], text_args['font_size'])
width = text_args['width']
draw = ImageDraw.Draw(image)
for line in textwrap.wrap(body, width=width):
draw.text((margin, offset), line, color, font=font)
offset += font.getsize(line)[1]
return image
def open_and_write(base, text_args, text):
'''
Open and write on an image.
Input: base - path to image,
'''
img = Image.open(base)
img = write_on_image(img, text_args, text)
return img
def write_body_image(image, text_args, text):
'''
Write text onto an image
'''
color = text_args['color']
margin, offset = text_args['position']
font = ImageFont.truetype(text_args['font_loc'], text_args['font_size'])
width = text_args['width']
draw = ImageDraw.Draw(image)
lines=0
text_split = re.split('[\n][\n]+', text)
for text in text_split:
for line in textwrap.wrap(text, width=width):
draw.text((margin, offset), line, color, font=font)
offset += font.getsize(line)[1]
lines+=1
lines+=2 # for new paragraph
offset+=font.getsize(line)[1]
return image, lines
def long_reddit_image(text_args_dict, paragraph_list, author, points,
background, size):
'''
List of images by punctuation split
'''
image_list = []
lines=0
text=''
second = False
author_font = ImageFont.truetype(text_args_dict['author']['font_loc'],
text_args_dict['author']['font_size'])
points_position = (text_args_dict['points']['position'][0]\
+author_font.getsize(author)[0],
text_args_dict['points']['position'][1])
points_args = text_args_dict['points'].copy()
points_args['position'] = points_position
# points depends on author + we want to format it nicely
dot = u"\u00B7"
if points >= 1000:
points = f' {dot} {round(points/1000, 1)}k points'
else:
points = f' {dot} {points} points'
for paragraph in paragraph_list:
for idx in range(len(paragraph)):
# need to add if more than 40 lines etc.
if lines <= 20:
current_text = ' '.join(paragraph[:idx+1])
base_img = create_base_image(background, size)
image = write_on_image(base_img, text_args_dict['author'],
author)
image = write_on_image(image, points_args, points)
image, lines = write_body_image(image, text_args_dict['body'],
text+current_text)
image_list.append(image)
first_idx = idx
elif lines>20:
# start new image with secondary layout
if second==False:
text=''
first_idx = idx
second=True # only want once
current_text = ' '.join(paragraph[first_idx : idx+1])
base_img = create_base_image(background, size)
image, lines = write_body_image(base_img,
text_args_dict['body_second'],
text+current_text)
image_list.append(image)
lines+=20
first_idx=0 # track that its a new para now
text += current_text + ' \n\n'
return image_list
def create_intro_image(author, points, subreddit, body ,text_args_dict,
template = False):
'''
Create intro image
'''
if template:
intro = Image.open(template)
else:
intro = create_base_image()
draw = ImageDraw.Draw(intro)
# subreddit
intro = write_on_image(intro, text_args_dict['intro_subreddit'],
'r/'+subreddit)
# author
intro = write_on_image(intro, text_args_dict['intro_author'],
' '+ u"\u00B7" + ' u/'+author)
# body
intro = write_on_image(intro, text_args_dict['intro_body'], body)
return intro
| 0
| 0
| 0
|
229ecd07ca05535989bc2af6b068986d3e701d65
| 6,476
|
py
|
Python
|
tests/db/test_client.py
|
Veritaris/fastapi_contrib
|
081670603917b1b7e9646c75fba5614b09823a3e
|
[
"MIT"
] | 504
|
2019-08-26T18:14:03.000Z
|
2022-03-25T13:49:50.000Z
|
tests/db/test_client.py
|
Veritaris/fastapi_contrib
|
081670603917b1b7e9646c75fba5614b09823a3e
|
[
"MIT"
] | 100
|
2019-08-23T07:52:30.000Z
|
2022-03-20T06:13:10.000Z
|
tests/db/test_client.py
|
identixone/fastapi_contrib
|
e61ecaa3fc114aebb89f4ced28b75bb6dfd21f05
|
[
"MIT"
] | 32
|
2019-10-01T12:46:14.000Z
|
2022-02-01T13:44:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from fastapi import FastAPI
from fastapi_contrib.db.client import MongoDBClient
from fastapi_contrib.db.models import MongoDBModel, MongoDBTimeStampedModel
from tests.mock import MongoDBMock
from tests.utils import override_settings, AsyncMock, AsyncIterator
from unittest.mock import patch
app = FastAPI()
app.mongodb = MongoDBMock()
@override_settings(fastapi_app="tests.db.test_client.app")
@override_settings(fastapi_app="tests.db.test_client.app")
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
| 27.913793
| 76
| 0.683138
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from fastapi import FastAPI
from fastapi_contrib.db.client import MongoDBClient
from fastapi_contrib.db.models import MongoDBModel, MongoDBTimeStampedModel
from tests.mock import MongoDBMock
from tests.utils import override_settings, AsyncMock, AsyncIterator
from unittest.mock import patch
app = FastAPI()
app.mongodb = MongoDBMock()
class Model(MongoDBModel):
class Meta:
collection = "collection"
@override_settings(fastapi_app="tests.db.test_client.app")
def test_mongodbclient_is_singleton():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
assert client == MongoDBClient()
@override_settings(fastapi_app="tests.db.test_client.app")
def test_get_collection():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
collection = client.get_collection("collection")
assert collection.name == "collection"
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_insert():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
insert_result = await client.insert(model)
assert insert_result.inserted_id == model.id
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_count():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
count = await client.count(model, id=1)
assert count == 1
# Test whether it correctly handles filter by non-id
count = await client.count(model, field="value")
assert count == 1
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_delete():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
delete_result = await client.delete(model, id=1)
assert delete_result.raw_result == {}
# Test whether it correctly handles filter by non-id
delete_result = await client.delete(model, field="value")
assert delete_result.raw_result == {}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_one():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
update_result = await client.update_one(
model, filter_kwargs={"id": 1}, id=2
)
assert update_result.raw_result == {}
# Test whether it correctly handles filter by non-id
update_result = await client.update_one(
model, filter_kwargs={"field": "value"}, field="value2"
)
assert update_result.raw_result == {}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_one_params():
with patch(
'fastapi_contrib.db.client.MongoDBClient.update_one',
new_callable=AsyncMock) as mock_update:
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
client = MongoDBClient()
model = Model()
await model.update_one(
filter_kwargs={"id": 1}, kwargs={'$set': {'bla': 1}}
)
mock_update.mock.assert_called_with(
client,
Model,
filter_kwargs={'id': 1},
kwargs={'$set': {'bla': 1}}
)
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_many_params():
with patch(
'fastapi_contrib.db.client.MongoDBClient.update_many',
new_callable=AsyncMock) as mock_update:
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
client = MongoDBClient()
model = Model()
await model.update_many(
filter_kwargs={"id": 1}, kwargs={'$set': {'bla': 1}}
)
mock_update.mock.assert_called_with(
client,
Model,
filter_kwargs={'id': 1}, kwargs={'$set': {'bla': 1}}
)
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_update_many():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
update_result = await client.update_many(
model, filter_kwargs={"id": 1}, id=2
)
assert update_result.raw_result == {}
# Test whether it correctly handles filter by non-id
update_result = await client.update_many(
model, filter_kwargs={"field": "value"}, field="value2"
)
assert update_result.raw_result == {}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_get():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
_dict = await client.get(model, id=1)
assert _dict == {"_id": 1}
# Test whether it correctly handles filter by non-id
_dict = await client.get(model, field="value")
assert _dict == {"_id": 1}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_list():
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
client = MongoDBClient()
model = Model(id=1)
cursor = client.list(model, id=1)
assert cursor
# Test whether it correctly handles filter by non-id
_dict = client.list(model, field="value")
assert _dict
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_client.app")
async def test_list_with_sort():
with patch('fastapi_contrib.db.client.MongoDBClient.list') as mock_list:
mock_list.return_value = AsyncIterator([])
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
model = Model()
await model.list(model, _limit=0, _offset=0, _sort=[('i', -1)])
mock_list.assert_called_with(
Model, _limit=0, _offset=0, _sort=[('i', -1)]
)
await model.list(model)
mock_list.assert_called_with(Model, _limit=0, _offset=0, _sort=None)
| 4,793
| 55
| 287
|
ba827c49e3526517c097653d7d372a40097b7233
| 3,402
|
py
|
Python
|
demo.py
|
MostafaGazar/temperature_scaling
|
a474b23f6958be9629b70341a85f5b53fcfb0ec8
|
[
"MIT"
] | 724
|
2017-08-03T14:35:06.000Z
|
2022-03-30T20:58:39.000Z
|
demo.py
|
MostafaGazar/temperature_scaling
|
a474b23f6958be9629b70341a85f5b53fcfb0ec8
|
[
"MIT"
] | 28
|
2017-08-04T15:01:04.000Z
|
2022-03-16T22:44:11.000Z
|
demo.py
|
MostafaGazar/temperature_scaling
|
a474b23f6958be9629b70341a85f5b53fcfb0ec8
|
[
"MIT"
] | 121
|
2017-12-08T02:19:35.000Z
|
2022-03-09T07:37:39.000Z
|
import fire
import os
import torch
import torchvision as tv
from torch.utils.data.sampler import SubsetRandomSampler
from models import DenseNet
from temperature_scaling import ModelWithTemperature
def demo(data, save, depth=40, growth_rate=12, batch_size=256):
"""
Applies temperature scaling to a trained model.
Takes a pretrained DenseNet-CIFAR100 model, and a validation set
(parameterized by indices on train set).
Applies temperature scaling, and saves a temperature scaled version.
NB: the "save" parameter references a DIRECTORY, not a file.
In that directory, there should be two files:
- model.pth (model state dict)
- valid_indices.pth (a list of indices corresponding to the validation set).
data (str) - path to directory where data should be loaded from/downloaded
save (str) - directory with necessary files (see above)
"""
# Load model state dict
model_filename = os.path.join(save, 'model.pth')
if not os.path.exists(model_filename):
raise RuntimeError('Cannot find file %s to load' % model_filename)
state_dict = torch.load(model_filename)
# Load validation indices
valid_indices_filename = os.path.join(save, 'valid_indices.pth')
if not os.path.exists(valid_indices_filename):
raise RuntimeError('Cannot find file %s to load' % valid_indices_filename)
valid_indices = torch.load(valid_indices_filename)
# Regenerate validation set loader
mean = [0.5071, 0.4867, 0.4408]
stdv = [0.2675, 0.2565, 0.2761]
test_transforms = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=mean, std=stdv),
])
valid_set = tv.datasets.CIFAR100(data, train=True, transform=test_transforms, download=True)
valid_loader = torch.utils.data.DataLoader(valid_set, pin_memory=True, batch_size=batch_size,
sampler=SubsetRandomSampler(valid_indices))
# Load original model
if (depth - 4) % 3:
raise Exception('Invalid depth')
block_config = [(depth - 4) // 6 for _ in range(3)]
orig_model = DenseNet(
growth_rate=growth_rate,
block_config=block_config,
num_classes=100
).cuda()
orig_model.load_state_dict(state_dict)
# Now we're going to wrap the model with a decorator that adds temperature scaling
model = ModelWithTemperature(orig_model)
# Tune the model temperature, and save the results
model.set_temperature(valid_loader)
model_filename = os.path.join(save, 'model_with_temperature.pth')
torch.save(model.state_dict(), model_filename)
print('Temperature scaled model sved to %s' % model_filename)
print('Done!')
if __name__ == '__main__':
"""
Applies temperature scaling to a trained model.
Takes a pretrained DenseNet-CIFAR100 model, and a validation set
(parameterized by indices on train set).
Applies temperature scaling, and saves a temperature scaled version.
NB: the "save" parameter references a DIRECTORY, not a file.
In that directory, there should be two files:
- model.pth (model state dict)
- valid_indices.pth (a list of indices corresponding to the validation set).
--data (str) - path to directory where data should be loaded from/downloaded
--save (str) - directory with necessary files (see above)
"""
fire.Fire(demo)
| 38.659091
| 97
| 0.708113
|
import fire
import os
import torch
import torchvision as tv
from torch.utils.data.sampler import SubsetRandomSampler
from models import DenseNet
from temperature_scaling import ModelWithTemperature
def demo(data, save, depth=40, growth_rate=12, batch_size=256):
"""
Applies temperature scaling to a trained model.
Takes a pretrained DenseNet-CIFAR100 model, and a validation set
(parameterized by indices on train set).
Applies temperature scaling, and saves a temperature scaled version.
NB: the "save" parameter references a DIRECTORY, not a file.
In that directory, there should be two files:
- model.pth (model state dict)
- valid_indices.pth (a list of indices corresponding to the validation set).
data (str) - path to directory where data should be loaded from/downloaded
save (str) - directory with necessary files (see above)
"""
# Load model state dict
model_filename = os.path.join(save, 'model.pth')
if not os.path.exists(model_filename):
raise RuntimeError('Cannot find file %s to load' % model_filename)
state_dict = torch.load(model_filename)
# Load validation indices
valid_indices_filename = os.path.join(save, 'valid_indices.pth')
if not os.path.exists(valid_indices_filename):
raise RuntimeError('Cannot find file %s to load' % valid_indices_filename)
valid_indices = torch.load(valid_indices_filename)
# Regenerate validation set loader
mean = [0.5071, 0.4867, 0.4408]
stdv = [0.2675, 0.2565, 0.2761]
test_transforms = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=mean, std=stdv),
])
valid_set = tv.datasets.CIFAR100(data, train=True, transform=test_transforms, download=True)
valid_loader = torch.utils.data.DataLoader(valid_set, pin_memory=True, batch_size=batch_size,
sampler=SubsetRandomSampler(valid_indices))
# Load original model
if (depth - 4) % 3:
raise Exception('Invalid depth')
block_config = [(depth - 4) // 6 for _ in range(3)]
orig_model = DenseNet(
growth_rate=growth_rate,
block_config=block_config,
num_classes=100
).cuda()
orig_model.load_state_dict(state_dict)
# Now we're going to wrap the model with a decorator that adds temperature scaling
model = ModelWithTemperature(orig_model)
# Tune the model temperature, and save the results
model.set_temperature(valid_loader)
model_filename = os.path.join(save, 'model_with_temperature.pth')
torch.save(model.state_dict(), model_filename)
print('Temperature scaled model sved to %s' % model_filename)
print('Done!')
if __name__ == '__main__':
"""
Applies temperature scaling to a trained model.
Takes a pretrained DenseNet-CIFAR100 model, and a validation set
(parameterized by indices on train set).
Applies temperature scaling, and saves a temperature scaled version.
NB: the "save" parameter references a DIRECTORY, not a file.
In that directory, there should be two files:
- model.pth (model state dict)
- valid_indices.pth (a list of indices corresponding to the validation set).
--data (str) - path to directory where data should be loaded from/downloaded
--save (str) - directory with necessary files (see above)
"""
fire.Fire(demo)
| 0
| 0
| 0
|
f9758037fff13223db9d17907e8825f9598c281c
| 2,269
|
py
|
Python
|
Spy/SpyPipe/SpyPipeServer.py
|
Cesare-TT/Svt_py_vif
|
98f05857c65ea13b065d41119ffb2e256e237be1
|
[
"Apache-2.0"
] | 2
|
2021-04-08T12:51:06.000Z
|
2021-04-24T10:08:19.000Z
|
Spy/SpyPipe/SpyPipeServer.py
|
Cesare-TT/svt_py_vif
|
98f05857c65ea13b065d41119ffb2e256e237be1
|
[
"Apache-2.0"
] | null | null | null |
Spy/SpyPipe/SpyPipeServer.py
|
Cesare-TT/svt_py_vif
|
98f05857c65ea13b065d41119ffb2e256e237be1
|
[
"Apache-2.0"
] | null | null | null |
import time
from concurrent import futures
from multiprocessing import Queue,Process
from threading import Event
import grpc
from . import SpyPipeGRPC_pb2 as proto_pb2
from . import SpyPipeGRPC_pb2_grpc as proto_pb2_grpc
#def stop_process(self):
# print('run stop process')
if __name__ == '__main__':
# server = serve()
# server.stop(grace=None)
print('python server started.')
import sys
server = SpyPipeServer('test')
server.start()
#server.stop()
# def serve():
# queue_rpc2storage = Queue(100)
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
# proto_pb2_grpc.add_RemoteStorageServicer_to_server(RemoteStorage(queue_rpc2storage), server)
# server.add_insecure_port('[::]:50051')
# server.start()
# # try:
# # while True:
# # time.sleep(60*60*24) # one day in seconds
# # except KeyboardInterrupt:
# # server.stop(0)
# return server
| 28.3625
| 101
| 0.639048
|
import time
from concurrent import futures
from multiprocessing import Queue,Process
from threading import Event
import grpc
from . import SpyPipeGRPC_pb2 as proto_pb2
from . import SpyPipeGRPC_pb2_grpc as proto_pb2_grpc
class RemoteStorage(proto_pb2_grpc.SpyPipeGRPCServicer):
def __init__(self,stop_event):
self.stop_event = stop_event
def SendData(self,request_iterator,context):
req_list = []
while 1:
req = next(request_iterator)
print(req)
req_list.append(req)
if req.control =="end":
#time.sleep(5)
self.stop_event.set()
#print(req_list)
#self.main_Server.stop_process()
#self.main_Server.stop_event.set()
return proto_pb2.ReceivedCount(control=req.control)
class SpyPipeServer(Process):
def __init__(self,name):
super().__init__()
self.name = name
def run(self):
stop_event = Event()
#queue_rpc2storage = Queue(100)
self.grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
proto_pb2_grpc.add_SpyPipeGRPCServicer_to_server(RemoteStorage(stop_event), self.grpc_server)
self.grpc_server.add_insecure_port('unix:SpyPipe_%s.sock' % self.name)
#self.grpc_server.add_insecure_port('http::./test')
self.grpc_server.start()
stop_event.wait()
self.stop()
#def stop_process(self):
# print('run stop process')
def stop(self):
self.grpc_server.stop(grace=None)
if __name__ == '__main__':
# server = serve()
# server.stop(grace=None)
print('python server started.')
import sys
server = SpyPipeServer('test')
server.start()
#server.stop()
# def serve():
# queue_rpc2storage = Queue(100)
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
# proto_pb2_grpc.add_RemoteStorageServicer_to_server(RemoteStorage(queue_rpc2storage), server)
# server.add_insecure_port('[::]:50051')
# server.start()
# # try:
# # while True:
# # time.sleep(60*60*24) # one day in seconds
# # except KeyboardInterrupt:
# # server.stop(0)
# return server
| 1,067
| 43
| 181
|
5bec0e0fdcc88704d18da64eca876a9a87d678f6
| 1,434
|
py
|
Python
|
openregister_client/django_compat/fields.py
|
ministryofjustice/openregister-client
|
28e73394d6d814b449fc85d2154e2ce4a53a5125
|
[
"MIT"
] | 2
|
2018-07-28T11:08:18.000Z
|
2018-11-19T12:33:25.000Z
|
openregister_client/django_compat/fields.py
|
ministryofjustice/openregister-client
|
28e73394d6d814b449fc85d2154e2ce4a53a5125
|
[
"MIT"
] | 1
|
2022-01-24T17:20:50.000Z
|
2022-01-24T17:20:50.000Z
|
openregister_client/django_compat/fields.py
|
ministryofjustice/openregister-client
|
28e73394d6d814b449fc85d2154e2ce4a53a5125
|
[
"MIT"
] | 1
|
2021-04-11T06:31:45.000Z
|
2021-04-11T06:31:45.000Z
|
import json
from django.db import models
from .model_factory import RegisterJSONEncoder
# TODO: require register field parameter to enable type coercion; Field needs to be deconstructable
# TODO: base code on django.contrib.postgres.fields.array.ArrayField
# TODO: add form field
| 31.866667
| 103
| 0.635286
|
import json
from django.db import models
from .model_factory import RegisterJSONEncoder
class ListField(models.CharField):
# TODO: require register field parameter to enable type coercion; Field needs to be deconstructable
# TODO: base code on django.contrib.postgres.fields.array.ArrayField
# TODO: add form field
def __init__(self, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 255)
kwargs['blank'] = True
super().__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if 'blank' in kwargs:
del kwargs['blank']
if kwargs.get('max_length') == 255:
del kwargs['max_length']
return name, path, args, kwargs
def from_db_value(self, value, expression, connection, context):
if value is None or isinstance(value, list):
return value
if isinstance(value, tuple):
return list(value)
return json.loads(value)
def to_python(self, value):
if value is None or isinstance(value, list):
return value
if isinstance(value, tuple):
return list(value)
return json.loads(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if isinstance(value, (tuple, list)):
return json.dumps(value, cls=RegisterJSONEncoder)
return value
| 968
| 13
| 158
|
960abd72c3af48754c70915c9583f00167061f89
| 900
|
py
|
Python
|
stubs/ev3_pybricks_v1_0_0/umqtt/simple.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/ev3_pybricks_v1_0_0/umqtt/simple.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/ev3_pybricks_v1_0_0/umqtt/simple.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
"""
Module: 'umqtt.simple' on LEGO EV3 v1.0.0
"""
# MCU: sysname=ev3, nodename=ev3, release=('v1.0.0',), version=('0.0.0',), machine=ev3
# Stubber: 1.3.2 - updated
from typing import Any
class MQTTClient:
""""""
class MQTTException(Exception):
""""""
socket = None
struct = None
| 16.071429
| 86
| 0.546667
|
"""
Module: 'umqtt.simple' on LEGO EV3 v1.0.0
"""
# MCU: sysname=ev3, nodename=ev3, release=('v1.0.0',), version=('0.0.0',), machine=ev3
# Stubber: 1.3.2 - updated
from typing import Any
class MQTTClient:
""""""
def _recv_len(self, *argv) -> Any:
pass
def _send_str(self, *argv) -> Any:
pass
def check_msg(self, *argv) -> Any:
pass
def connect(self, *argv) -> Any:
pass
def disconnect(self, *argv) -> Any:
pass
def ping(self, *argv) -> Any:
pass
def publish(self, *argv) -> Any:
pass
def set_callback(self, *argv) -> Any:
pass
def set_last_will(self, *argv) -> Any:
pass
def subscribe(self, *argv) -> Any:
pass
def wait_msg(self, *argv) -> Any:
pass
class MQTTException(Exception):
""""""
def hexlify():
pass
socket = None
struct = None
| 286
| 0
| 320
|
285d269052487416104191179ac4d238b0511c2b
| 1,333
|
py
|
Python
|
references/test_code/test_source/scripts/test.py
|
qoopen0815/ADS-AutomaticDartScorer-
|
9c287aa2b2db71ac7e15085d88dff4cf83c3a7c9
|
[
"Apache-2.0"
] | null | null | null |
references/test_code/test_source/scripts/test.py
|
qoopen0815/ADS-AutomaticDartScorer-
|
9c287aa2b2db71ac7e15085d88dff4cf83c3a7c9
|
[
"Apache-2.0"
] | 1
|
2020-09-26T05:55:21.000Z
|
2020-09-26T05:55:21.000Z
|
references/test_code/test_source/scripts/test.py
|
calm0815/ADS-AutomaticDartScorer-
|
9c287aa2b2db71ac7e15085d88dff4cf83c3a7c9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import cv2
# トラックバーの値を変更する度にRGBを出力する
# 画像の読み込み
# img = cv2.imread("../../../../resources/capture_l_plane.png", 1)
img = cv2.imread("../../../../resources/result.png", 1)
# img = cv2.resize(img , (int(img.shape[1]*0.5), int(img.shape[0]*0.5)))
# ウィンドウのサイズを変更可能にする
cv2.namedWindow("img", cv2.WINDOW_NORMAL)
# トラックバーの生成
cv2.createTrackbar("R_min", "img", 0, 255, changeColor)
cv2.createTrackbar("R_max", "img", 0, 255, changeColor)
cv2.createTrackbar("G_min", "img", 0, 255, changeColor)
cv2.createTrackbar("G_max", "img", 0, 255, changeColor)
cv2.createTrackbar("B_min", "img", 0, 255, changeColor)
cv2.createTrackbar("B_max", "img", 0, 255, changeColor)
# 「Q」が押されるまで画像を表示する
while (True):
# cv2.imshow("img", mask_image)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
| 35.078947
| 99
| 0.663916
|
# -*- coding: utf-8 -*-
import cv2
# トラックバーの値を変更する度にRGBを出力する
def changeColor(val):
r_min = cv2.getTrackbarPos("R_min", "img")
r_max = cv2.getTrackbarPos("R_max", "img")
g_min = cv2.getTrackbarPos("G_min", "img")
g_max = cv2.getTrackbarPos("G_max", "img")
b_min = cv2.getTrackbarPos("B_min", "img")
b_max = cv2.getTrackbarPos("B_max", "img")
mask_image = cv2.inRange(img, (b_min, g_min, r_min), (b_max, g_max, r_max)) # BGR画像なのでタプルもBGR並び
# (X)ウィンドウに表示
cv2.namedWindow("img", cv2.WINDOW_NORMAL)
cv2.imshow("img", mask_image)
# 画像の読み込み
# img = cv2.imread("../../../../resources/capture_l_plane.png", 1)
img = cv2.imread("../../../../resources/result.png", 1)
# img = cv2.resize(img , (int(img.shape[1]*0.5), int(img.shape[0]*0.5)))
# ウィンドウのサイズを変更可能にする
cv2.namedWindow("img", cv2.WINDOW_NORMAL)
# トラックバーの生成
cv2.createTrackbar("R_min", "img", 0, 255, changeColor)
cv2.createTrackbar("R_max", "img", 0, 255, changeColor)
cv2.createTrackbar("G_min", "img", 0, 255, changeColor)
cv2.createTrackbar("G_max", "img", 0, 255, changeColor)
cv2.createTrackbar("B_min", "img", 0, 255, changeColor)
cv2.createTrackbar("B_max", "img", 0, 255, changeColor)
# 「Q」が押されるまで画像を表示する
while (True):
# cv2.imshow("img", mask_image)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
| 518
| 0
| 22
|
ec2992cf74bd952128a373125505ff1e8aab1f87
| 126,215
|
py
|
Python
|
pysnmp-with-texts/DLSW-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/DLSW-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/DLSW-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module DLSW-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DLSW-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:07:01 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
sdlcLSAddress, = mibBuilder.importSymbols("SNA-SDLC-MIB", "sdlcLSAddress")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Gauge32, MibIdentifier, Counter64, IpAddress, ModuleIdentity, mib_2, TimeTicks, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, Bits, Unsigned32, ObjectIdentity, NotificationType, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "MibIdentifier", "Counter64", "IpAddress", "ModuleIdentity", "mib-2", "TimeTicks", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "Bits", "Unsigned32", "ObjectIdentity", "NotificationType", "iso")
TruthValue, TextualConvention, RowPointer, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "RowPointer", "RowStatus", "DisplayString")
dlsw = ModuleIdentity((1, 3, 6, 1, 2, 1, 46))
if mibBuilder.loadTexts: dlsw.setLastUpdated('9606040900Z')
if mibBuilder.loadTexts: dlsw.setOrganization('AIW DLSw MIB RIGLET and IETF DLSw MIB Working Group')
if mibBuilder.loadTexts: dlsw.setContactInfo('David D. Chen IBM Corporation 800 Park, Highway 54 Research Triangle Park, NC 27709-9990 Tel: 1 919 254 6182 E-mail: dchen@vnet.ibm.com')
if mibBuilder.loadTexts: dlsw.setDescription('This MIB module contains objects to manage Data Link Switches.')
dlswMIB = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1))
dlswDomains = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 2))
null = MibIdentifier((0, 0))
dlswTCPDomain = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 2, 1))
dlswNode = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 1))
dlswTConn = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 2))
dlswInterface = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 3))
dlswDirectory = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 4))
dlswCircuit = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 5))
dlswSdlc = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 6))
dlswNodeVersion = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswNodeVersion.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswNodeVersion.setStatus('current')
if mibBuilder.loadTexts: dlswNodeVersion.setDescription('This value identifies the particular version of the DLSw standard supported by this DLSw. The first octet is a hexadecimal value representing the DLSw standard Version number of this DLSw, and the second is a hexadecimal value representing the DLSw standard Release number. This information is reported in DLSw Capabilities Exchange.')
dlswNodeVendorID = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(3, 3)).setFixedLength(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswNodeVendorID.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswNodeVendorID.setStatus('current')
if mibBuilder.loadTexts: dlswNodeVendorID.setDescription("The value identifies the manufacturer's IEEE-assigned organizationally Unique Identifier (OUI) of this DLSw. This information is reported in DLSw Capabilities Exchange.")
dlswNodeVersionString = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswNodeVersionString.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswNodeVersionString.setStatus('current')
if mibBuilder.loadTexts: dlswNodeVersionString.setDescription('This string gives product-specific information about this DLSw (e.g., product name, code release and fix level). This flows in Capabilities Exchange messages.')
dlswNodeStdPacingSupport = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("adaptiveRcvWindow", 2), ("fixedRcvWindow", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswNodeStdPacingSupport.setStatus('current')
if mibBuilder.loadTexts: dlswNodeStdPacingSupport.setDescription('Circuit pacing, as defined in the DLSw Standard, allows each of the two DLSw nodes on a circuit to control the amount of data the other is permitted to send to them. This object reflects the level of support the DLSw node has for this protocol. (1) means the node has no support for the standard circuit pacing flows; it may use RFC 1434+ methods only, or a proprietary flow control scheme. (2) means the node supports the standard scheme and can vary the window sizes it grants as a data receiver. (3) means the node supports the standard scheme but never varies its receive window size.')
dlswNodeStatus = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("inactive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswNodeStatus.setStatus('current')
if mibBuilder.loadTexts: dlswNodeStatus.setDescription('The status of the DLSw part of the system. Changing the value from active to inactive causes DLSw to take the following actions - (1) it disconnects all circuits through all DLSw partners, (2) it disconnects all transport connections to all DLSw partners, (3) it disconnects all local DLC connections, and (4) it stops processing all DLC connection set-up traffic. Since these are destructive actions, the user should query the circuit and transport connection tables in advance to understand the effect this action will have. Changing the value from inactive to active causes DLSw to come up in its initial state, i.e., transport connections established and ready to bring up circuits.')
dlswNodeUpTime = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 6), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswNodeUpTime.setStatus('current')
if mibBuilder.loadTexts: dlswNodeUpTime.setDescription('The amount of time (in hundredths of a second) since the DLSw portion of the system was last re-initialized. That is, if dlswState is in the active state, the time the dlswState entered the active state. It will remain zero if dlswState is in the inactive state.')
dlswNodeVirtualSegmentLFSize = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 7), LFSize().clone('lfs65535')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswNodeVirtualSegmentLFSize.setStatus('current')
if mibBuilder.loadTexts: dlswNodeVirtualSegmentLFSize.setDescription('The largest frame size (including DLC header and info field but not any MAC-level or framing octets) this DLSw can forward on any path through itself. This object can represent any box- level frame size forwarding restriction (e.g., from the use of fixed-size buffers). Some DLSw implementations will have no such restriction. This value will affect the LF size of circuits during circuit creation. The LF size of an existing circuit can be found in the RIF (Routing Information Field).')
dlswNodeResourceNBExclusivity = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 8), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswNodeResourceNBExclusivity.setStatus('current')
if mibBuilder.loadTexts: dlswNodeResourceNBExclusivity.setDescription('The value of true indicates that the NetBIOS Names configured in dlswDirNBTable are the only ones accessible via this DLSw. If a node supports sending run-time capabilities exchange messages, changes to this object should cause that action. It is up to the implementation exactly when to start the run-time capabilities exchange.')
dlswNodeResourceMacExclusivity = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 9), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswNodeResourceMacExclusivity.setStatus('current')
if mibBuilder.loadTexts: dlswNodeResourceMacExclusivity.setDescription('The value of true indicates that the MAC addresses configured in the dlswDirMacTable are the only ones accessible via this DLSw. If a node supports sending run-time capabilities exchange messages, changes to this object should cause that action. It is up to the implementation exactly when to start the run-time capabilities exchange.')
dlswTConnStat = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 2, 1))
dlswTConnStatActiveConnections = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 2, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnStatActiveConnections.setStatus('current')
if mibBuilder.loadTexts: dlswTConnStatActiveConnections.setDescription("The number of transport connections that are not in `disconnected' state.")
dlswTConnStatCloseIdles = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnStatCloseIdles.setStatus('current')
if mibBuilder.loadTexts: dlswTConnStatCloseIdles.setDescription('The number of times transport connections in this node exited the connected state with zero active circuits on the transport connection.')
dlswTConnStatCloseBusys = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnStatCloseBusys.setStatus('current')
if mibBuilder.loadTexts: dlswTConnStatCloseBusys.setDescription('The number of times transport connections in this node exited the connected state with some non-zero number of active circuits on the transport connection. Normally this means the transport connection failed unexpectedly.')
dlswTConnConfigTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 2, 2), )
if mibBuilder.loadTexts: dlswTConnConfigTable.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigTable.setDescription("This table defines the transport connections that will be initiated or accepted by this DLSw. Structure of masks allows wildcard definition for a collection of transport connections by a conceptual row. For a specific transport connection, there may be multiple of conceptual rows match the transport address. The `best' match will the one to determine the characteristics of the transport connection.")
dlswTConnConfigEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1), ).setIndexNames((0, "DLSW-MIB", "dlswTConnConfigIndex"))
if mibBuilder.loadTexts: dlswTConnConfigEntry.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigEntry.setDescription('Each conceptual row defines a collection of transport connections.')
dlswTConnConfigIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dlswTConnConfigIndex.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigIndex.setDescription('The index to the conceptual row of the table. Negative numbers are not allowed. There are objects defined that point to conceptual rows of this table with this index value. Zero is used to denote that no corresponding row exists. Index values are assigned by the agent, and should not be reused but should continue to increase in value.')
dlswTConnConfigTDomain = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 2), ObjectIdentifier()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigTDomain.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigTDomain.setDescription('The object identifier which indicates the transport domain of this conceptual row.')
dlswTConnConfigLocalTAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 3), TAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigLocalTAddr.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigLocalTAddr.setDescription('The local transport address for this conceptual row of the transport connection definition.')
dlswTConnConfigRemoteTAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 4), TAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigRemoteTAddr.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigRemoteTAddr.setDescription('The remote transport address. Together with dlswTConnConfigEntryType and dlswTConnConfigGroupDefinition, the object instance of this conceptual row identifies a collection of the transport connections that will be either initiated by this DLSw or initiated by a partner DLSw and accepted by this DLSw.')
dlswTConnConfigLastModifyTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 5), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnConfigLastModifyTime.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigLastModifyTime.setDescription('The time (in hundredths of a second) since the value of any object in this conceptual row except for dlswTConnConfigOpens was last changed. This value may be compared to dlswTConnOperConnectTime to determine whether values in this row are completely valid for a transport connection created using this row definition.')
dlswTConnConfigEntryType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("individual", 1), ("global", 2), ("group", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigEntryType.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigEntryType.setDescription("The object instance signifies the type of entry in the associated conceptual row. The value of `individual' means that the entry applies to a specific partner DLSw node as identified by dlswTConnConfigRemoteTAddr and dlswTConnConfigTDomain. The value of `global' means that the entry applies to all partner DLSw nodes of the TDomain. The value of 'group' means that the entry applies to a specific set of DLSw nodes in the TDomain. Any group definitions are enterprise-specific and are pointed to by dlswTConnConfigGroupDefinition. In the cases of `global' and `group', the value in dlswTConnConfigRemoteTAddr may not have any significance.")
dlswTConnConfigGroupDefinition = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 7), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigGroupDefinition.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigGroupDefinition.setDescription("For conceptual rows of `individual' and `global' as specified in dlswTConnConfigEntryType, the instance of this object is `0.0'. For conceptual rows of `group', the instance points to the specific group definition.")
dlswTConnConfigSetupType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("activePersistent", 2), ("activeOnDemand", 3), ("passive", 4), ("excluded", 5))).clone('passive')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigSetupType.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigSetupType.setDescription('This value of the instance of a conceptual row identifies the behavior of the collection of transport connections that this conceptual row defines. The value of activePersistent, activeOnDemand and passive means this DLSw will accept any transport connections, initiated by partner DLSw nodes, which are defined by this conceptual row. The value of activePersistent means this DLSw will also initiate the transport connections of this conceptual row and retry periodically if necessary. The value of activeOnDemand means this DLSw will initiate a transport connection of this conceptual row, if there is a directory cache hits. The value of other is implementation specific. The value of exclude means that the specified node is not allowed to be a partner to this DLSw node. To take a certain conceptual row definition out of service, a value of notInService for dlswTConnConfigRowStatus should be used.')
dlswTConnConfigSapList = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16).clone(hexValue="AA000000000000000000000000000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigSapList.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigSapList.setDescription('The SAP list indicates which SAPs are advertised to the transport connection defined by this conceptual row. Only SAPs with even numbers are represented, in the form of the most significant bit of the first octet representing the SAP 0, the next most significant bit representing the SAP 2, to the least significant bit of the last octet representing the SAP 254. Data link switching is allowed for those SAPs which have one in its corresponding bit, not allowed otherwise. The whole SAP list has to be changed together. Changing the SAP list affects only new circuit establishments and has no effect on established circuits. This list can be used to restrict specific partners from knowing about all the SAPs used by DLSw on all its interfaces (these are represented in dlswIfSapList for each interface). For instance, one may want to run NetBIOS with some partners but not others. If a node supports sending run-time capabilities exchange messages, changes to this object should cause that action. When to start the run-time capabilities exchange is implementation-specific. The DEFVAL below indicates support for SAPs 0, 4, 8, and C.')
dlswTConnConfigAdvertiseMacNB = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 10), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigAdvertiseMacNB.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigAdvertiseMacNB.setDescription('The value of true indicates that any defined local MAC addresses and NetBIOS names will be advertised to a partner node via initial and (if supported) run-time capabilities exchange messages. The DLSw node should send the appropriate exclusivity control vector to accompany each list it sends, or to represent that the node is explicitly configured to have a null list. The value of false indicates that the DLSw node should not send a MAC address list or NetBIOS name list, and should also not send their corresponding exclusivity control vectors.')
dlswTConnConfigInitCirRecvWndw = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(1)).setUnits('SSP messages').setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigInitCirRecvWndw.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigInitCirRecvWndw.setDescription('The initial circuit receive pacing window size, in the unit of SSP messages, to be used for future transport connections activated using this table row. The managed node sends this value as its initial receive pacing window in its initial capabilities exchange message. Changing this value does not affect the initial circuit receive pacing window size of currently active transport connections. If the standard window pacing scheme is not supported, the value is zero. A larger receive window value may be appropriate for partners that are reachable only via physical paths that have longer network delays.')
dlswTConnConfigOpens = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnConfigOpens.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigOpens.setDescription('Number of times transport connections entered connected state according to the definition of this conceptual row.')
dlswTConnConfigRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 13), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigRowStatus.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigRowStatus.setDescription('This object is used by the manager to create or delete the row entry in the dlswTConnConfigTable following the RowStatus textual convention. The value of notInService will be used to take a conceptual row definition out of use.')
dlswTConnOperTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 2, 3), )
if mibBuilder.loadTexts: dlswTConnOperTable.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperTable.setDescription('A list of transport connections. It is optional but desirable for the agent to keep an entry for some period of time after the transport connection is disconnected. This allows the manager to capture additional useful information about the connection, in particular, statistical information and the cause of the disconnection.')
dlswTConnOperEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1), ).setIndexNames((0, "DLSW-MIB", "dlswTConnOperTDomain"), (0, "DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTConnOperEntry.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperEntry.setDescription('')
dlswTConnOperTDomain = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 1), ObjectIdentifier())
if mibBuilder.loadTexts: dlswTConnOperTDomain.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperTDomain.setDescription('The object identifier indicates the transport domain of this transport connection.')
dlswTConnOperLocalTAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 2), TAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperLocalTAddr.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperLocalTAddr.setDescription('The local transport address for this transport connection. This value could be different from dlswTConnConfigLocalAddr, if the value of the latter were changed after this transport connection was established.')
dlswTConnOperRemoteTAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 3), TAddress())
if mibBuilder.loadTexts: dlswTConnOperRemoteTAddr.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperRemoteTAddr.setDescription('The remote transport address of this transport connection.')
dlswTConnOperEntryTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 4), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperEntryTime.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperEntryTime.setDescription('The amount of time (in hundredths of a second) since this transport connection conceptual row was created.')
dlswTConnOperConnectTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 5), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperConnectTime.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperConnectTime.setDescription("The amount of time (in hundredths of a second) since this transport connection last entered the 'connected' state. A value of zero means this transport connection has never been established.")
dlswTConnOperState = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("connecting", 1), ("initCapExchange", 2), ("connected", 3), ("quiescing", 4), ("disconnecting", 5), ("disconnected", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswTConnOperState.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperState.setDescription("The state of this transport connection. The transport connection enters `connecting' state when DLSw makes a connection request to the transport layer. Once initial Capabilities Exchange is sent, the transport connection enters enters `initCapExchange' state. When partner capabilities have been determined and the transport connection is ready for sending CanUReach (CUR) messages, it moves to the `connected' state. When DLSw is in the process of bringing down the connection, it is in the `disconnecting' state. When the transport layer indicates one of its connections is disconnected, the transport connection moves to the `disconnected' state. Whereas all of the values will be returned in response to a management protocol retrieval operation, only two values may be specified in a management protocol set operation: `quiescing' and `disconnecting'. Changing the value to `quiescing' prevents new circuits from being established, and will cause a transport disconnect when the last circuit on the connection goes away. Changing the value to `disconnecting' will force off all circuits immediately and bring the connection to `disconnected' state.")
dlswTConnOperConfigIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperConfigIndex.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperConfigIndex.setDescription('The value of dlswTConnConfigIndex of the dlswTConnConfigEntry that governs the configuration information used by this dlswTConnOperEntry. The manager can therefore normally examine both configured and operational information for this transport connection. This value is zero if the corresponding dlswTConnConfigEntry was deleted after the creation of this dlswTConnOperEntry. If some fields in the former were changed but the conceptual row was not deleted, some configuration information may not be valid for this operational transport connection. The manager can compare dlswTConnOperConnectTime and dlswTConnConfigLastModifyTime to determine if this condition exists.')
dlswTConnOperFlowCntlMode = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("undetermined", 1), ("pacing", 2), ("other", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperFlowCntlMode.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperFlowCntlMode.setDescription('The flow control mechanism in use on this transport connection. This value is undetermined (1) before the mode of flow control can be established on a new transport connection (i.e., after CapEx is sent but before Capex or other SSP control messages have been received). Pacing (2) indicates that the standard RFC 1795 pacing mechanism is in use. Other (3) may be either the RFC 1434+ xBusy mechanism operating to a back-level DLSw, or a vendor-specific flow control method. Whether it is xBusy or not can be inferred from dlswTConnOperPartnerVersion.')
dlswTConnOperPartnerVersion = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 9), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(2, 2), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerVersion.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswTConnOperPartnerVersion.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerVersion.setDescription("This value identifies which version (first octet) and release (second octet) of the DLSw standard is supported by this partner DLSw. This information is obtained from a DLSw capabilities exchange message received from the partner DLSw. A string of zero length is returned before a Capabilities Exchange message is received, or if one is never received. A conceptual row with a dlswTConnOperState of `connected' but a zero length partner version indicates that the partner is a non-standard DLSw partner. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerVendorID = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 10), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(3, 3), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerVendorID.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerVendorID.setDescription("This value identifies the IEEE-assigned organizationally Unique Identifier (OUI) of the maker of this partner DLSw. This information is obtained from a DLSw capabilities exchange message received from the partner DLSw. A string of zero length is returned before a Capabilities Exchange message is received, or if one is never received. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerVersionStr = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 253))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerVersionStr.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswTConnOperPartnerVersionStr.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerVersionStr.setDescription("This value identifies the particular product version (e.g., product name, code level, fix level) of this partner DLSw. The format of the actual version string is vendor-specific. This information is obtained from a DLSw capabilities exchange message received from the partner DLSw. A string of zero length is returned before a Capabilities Exchange message is received, if one is never received, or if one is received but it does not contain a version string. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerInitPacingWndw = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerInitPacingWndw.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswTConnOperPartnerInitPacingWndw.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerInitPacingWndw.setDescription("The value of the partner initial receive pacing window. This is our initial send pacing window for all new circuits on this transport connection, as modified and granted by the first flow control indication the partner sends on each circuit. This information is obtained from a DLSw capabilities exchange message received from the partner DLSw. A value of zero is returned before a Capabilities Exchange message is received, or if one is never received. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerSapList = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 13), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(16, 16), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerSapList.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerSapList.setDescription("The Supported SAP List received in the capabilities exchange message from the partner DLSw. This list has the same format described for dlswTConnConfigSapList. A string of zero length is returned before a Capabilities Exchange message is received, or if one is never received. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerNBExcl = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 14), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerNBExcl.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerNBExcl.setDescription("The value of true signifies that the NetBIOS names received from this partner in the NetBIOS name list in its capabilities exchange message are the only NetBIOS names reachable by that partner. `False' indicates that other NetBIOS names may be reachable. `False' should be returned before a Capabilities Exchange message is received, if one is never received, or if one is received without a NB Name Exclusivity CV. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerMacExcl = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 15), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerMacExcl.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerMacExcl.setDescription("The value of true signifies that the MAC addresses received from this partner in the MAC address list in its capabilities exchange message are the only MAC addresses reachable by that partner. `False' indicates that other MAC addresses may be reachable. `False' should be returned before a Capabilities Exchange message is received, if one is never received, or if one is received without a MAC Address Exclusivity CV. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerNBInfo = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("partial", 2), ("complete", 3), ("notApplicable", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerNBInfo.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerNBInfo.setDescription("It is up to this DSLw whether to keep either none, some, or all of the NetBIOS name list that was received in the capabilities exchange message sent by this partner DLSw. This object identifies how much information was kept by this DLSw. These names are stored as userConfigured remote entries in dlswDirNBTable. A value of (4), notApplicable, should be returned before a Capabilities Exchange message is received, or if one is never received. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerMacInfo = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("partial", 2), ("complete", 3), ("notApplicable", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerMacInfo.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerMacInfo.setDescription("It is up to this DLSw whether to keep either none, some, or all of the MAC address list that was received in the capabilities exchange message sent by this partner DLSw. This object identifies how much information was kept by this DLSw. These names are stored as userConfigured remote entries in dlswDirMACTable. A value of (4), notApplicable, should be returned before a Capabilities Exchange message is received, or if one is never received. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperDiscTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 18), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperDiscTime.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperDiscTime.setDescription("The amount of time (in hundredths of a second) since the dlswTConnOperState last entered `disconnected' state.")
dlswTConnOperDiscReason = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("other", 1), ("capExFailed", 2), ("transportLayerDisc", 3), ("operatorCommand", 4), ("lastCircuitDiscd", 5), ("protocolError", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperDiscReason.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperDiscReason.setDescription('This object signifies the reason that either prevented the transport connection from entering the connected state, or caused the transport connection to enter the disconnected state.')
dlswTConnOperDiscActiveCir = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperDiscActiveCir.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperDiscActiveCir.setDescription('The number of circuits active (not in DISCONNECTED state) at the time the transport connection was last disconnected. This value is zero if the transport connection has never been connected.')
dlswTConnOperInDataPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 21), Counter32()).setUnits('SSP messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperInDataPkts.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperInDataPkts.setDescription('The number of Switch-to-Switch Protocol (SSP) messages of type DGRMFRAME, DATAFRAME, or INFOFRAME received on this transport connection.')
dlswTConnOperOutDataPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 22), Counter32()).setUnits('SSP messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperOutDataPkts.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperOutDataPkts.setDescription('The number of Switch-to-Switch Protocol (SSP) messages of type DGRMFRAME, DATAFRAME, or INFOFRAME transmitted on this transport connection.')
dlswTConnOperInDataOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 23), Counter32()).setUnits('octets').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperInDataOctets.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperInDataOctets.setDescription('The number octets in Switch-to-Switch Protocol (SSP) messages of type DGRMFRAME, DATAFRAME, or INFOFRAME received on this transport connection. Each message is counted starting with the first octet following the SSP message header.')
dlswTConnOperOutDataOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 24), Counter32()).setUnits('octets').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperOutDataOctets.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperOutDataOctets.setDescription('The number octets in Switch-to-Switch Protocol (SSP) messages of type DGRMFRAME, DATAFRAME, or INFOFRAME transmitted on this transport connection. Each message is counted starting with the first octet following the SSP message header.')
dlswTConnOperInCntlPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 25), Counter32()).setUnits('SSP messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperInCntlPkts.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperInCntlPkts.setDescription('The number of Switch-to-Switch Protocol (SSP) messages received on this transport connection which were not of type DGRMFRAME, DATAFRAME, or INFOFRAME.')
dlswTConnOperOutCntlPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 26), Counter32()).setUnits('SSP messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperOutCntlPkts.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperOutCntlPkts.setDescription('The number of Switch-to-Switch Protocol (SSP) messages of transmitted on this transport connection which were not of type DGRMFRAME, DATAFRAME, or INFOFRAME.')
dlswTConnOperCURexSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperCURexSents.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperCURexSents.setDescription('The number of CanUReach_ex messages sent on this transport connection.')
dlswTConnOperICRexRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperICRexRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperICRexRcvds.setDescription('The number of ICanReach_ex messages received on this transport connection.')
dlswTConnOperCURexRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperCURexRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperCURexRcvds.setDescription('The number of CanUReach_ex messages received on this transport connection.')
dlswTConnOperICRexSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperICRexSents.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperICRexSents.setDescription('The number of ICanReach_ex messages sent on this transport connection.')
dlswTConnOperNQexSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperNQexSents.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperNQexSents.setDescription('The number of NetBIOS_NQ_ex (NetBIOS Name Query-explorer) messages sent on this transport connection.')
dlswTConnOperNRexRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperNRexRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperNRexRcvds.setDescription('The number of NETBIOS_NR_ex (NetBIOS Name Recognized-explorer) messages received on this transport connection.')
dlswTConnOperNQexRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 33), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperNQexRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperNQexRcvds.setDescription('The number of NETBIOS_NQ_ex messages received on this transport connection.')
dlswTConnOperNRexSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 34), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperNRexSents.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperNRexSents.setDescription('The number of NETBIOS_NR_ex messages sent on this transport connection.')
dlswTConnOperCirCreates = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 35), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperCirCreates.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperCirCreates.setDescription("The number of times that circuits entered `circuit_established' state (not counting transitions from `circuit_restart').")
dlswTConnOperCircuits = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 36), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperCircuits.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperCircuits.setDescription("The number of currently active circuits on this transport connection, where `active' means not in `disconnected' state.")
dlswTConnSpecific = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 2, 4))
dlswTConnTcp = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1))
dlswTConnTcpConfigTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 1), )
if mibBuilder.loadTexts: dlswTConnTcpConfigTable.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigTable.setDescription('This table defines the TCP transport connections that will be either initiated by or accepted by this DSLw. It augments the entries in dlswTConnConfigTable whose domain is dlswTCPDomain.')
dlswTConnTcpConfigEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 1, 1), ).setIndexNames((0, "DLSW-MIB", "dlswTConnConfigIndex"))
if mibBuilder.loadTexts: dlswTConnTcpConfigEntry.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigEntry.setDescription('Each conceptual row defines parameters that are specific to dlswTCPDomain transport connections.')
dlswTConnTcpConfigKeepAliveInt = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1800))).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnTcpConfigKeepAliveInt.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigKeepAliveInt.setDescription('The time in seconds between TCP keepAlive messages when no traffic is flowing. Zero signifies no keepAlive protocol. Changes take effect only for new TCP connections.')
dlswTConnTcpConfigTcpConnections = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnTcpConfigTcpConnections.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigTcpConnections.setDescription('This is our preferred number of TCP connections within a TCP transport connection. The actual number used is negotiated at capabilities exchange time. Changes take effect only for new transport connections.')
dlswTConnTcpConfigMaxSegmentSize = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(4096)).setUnits('packets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnTcpConfigMaxSegmentSize.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigMaxSegmentSize.setDescription('This is the number of bytes that this node is willing to receive over the read TCP connection(s). Changes take effect for new transport connections.')
dlswTConnTcpOperTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 2), )
if mibBuilder.loadTexts: dlswTConnTcpOperTable.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperTable.setDescription('A list of TCP transport connections. It is optional but desirable for the agent to keep an entry for some period of time after the transport connection is disconnected. This allows the manager to capture additional useful information about the connection, in particular, statistical information and the cause of the disconnection.')
dlswTConnTcpOperEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 2, 1), ).setIndexNames((0, "DLSW-MIB", "dlswTConnOperTDomain"), (0, "DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTConnTcpOperEntry.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperEntry.setDescription('')
dlswTConnTcpOperKeepAliveInt = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1800))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnTcpOperKeepAliveInt.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperKeepAliveInt.setDescription('The time in seconds between TCP keepAlive messages when no traffic is flowing. Zero signifies no keepAlive protocol is operating.')
dlswTConnTcpOperPrefTcpConnections = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnTcpOperPrefTcpConnections.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperPrefTcpConnections.setDescription('This is the number of TCP connections preferred by this DLSw partner, as received in its capabilities exchange message.')
dlswTConnTcpOperTcpConnections = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnTcpOperTcpConnections.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperTcpConnections.setDescription('This is the actual current number of TCP connections within this transport connection.')
dlswIfTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 3, 1), )
if mibBuilder.loadTexts: dlswIfTable.setStatus('current')
if mibBuilder.loadTexts: dlswIfTable.setDescription('The list of interfaces on which DLSw is active.')
dlswIfEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 3, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: dlswIfEntry.setStatus('current')
if mibBuilder.loadTexts: dlswIfEntry.setDescription('')
dlswIfRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 3, 1, 1, 1), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswIfRowStatus.setStatus('current')
if mibBuilder.loadTexts: dlswIfRowStatus.setDescription('This object is used by the manager to create or delete the row entry in the dlswIfTable following the RowStatus textual convention.')
dlswIfVirtualSegment = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 4095), ValueRangeConstraint(65535, 65535), )).clone(65535)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswIfVirtualSegment.setStatus('current')
if mibBuilder.loadTexts: dlswIfVirtualSegment.setDescription('The segment number that uniquely identifies the virtual segment to which this DLSw interface is connected. Current source routing protocols limit this value to the range 0 - 4095. (The value 0 is used by some management applications for special test cases.) A value of 65535 signifies that no virtual segment is assigned to this interface. For instance, in a non-source routing environment, segment number assignment is not required.')
dlswIfSapList = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 3, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16).clone(hexValue="AA000000000000000000000000000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswIfSapList.setStatus('current')
if mibBuilder.loadTexts: dlswIfSapList.setDescription('The SAP list indicates which SAPs are allowed to be data link switched through this interface. This list has the same format described for dlswTConnConfigSapList. When changes to this object take effect is implementation- specific. Turning off a particular SAP can destroy active circuits that are using that SAP. An agent implementation may reject such changes until there are no active circuits if it so chooses. In this case, it is up to the manager to close the circuits first, using dlswCircuitState. The DEFVAL below indicates support for SAPs 0, 4, 8, and C.')
dlswDirStat = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 4, 1))
dlswDirMacEntries = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirMacEntries.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacEntries.setDescription('The current total number of entries in the dlswDirMacTable.')
dlswDirMacCacheHits = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirMacCacheHits.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacCacheHits.setDescription('The number of times a cache search for a particular MAC address resulted in success.')
dlswDirMacCacheMisses = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirMacCacheMisses.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacCacheMisses.setDescription('The number of times a cache search for a particular MAC address resulted in failure.')
dlswDirMacCacheNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirMacCacheNextIndex.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacCacheNextIndex.setDescription('The next value of dlswDirMacIndex to be assigned by the agent. A retrieval of this object atomically reserves the returned value for use by the manager to create a row in dlswDirMacTable. This makes it possible for the agent to control the index space of the MAC address cache, yet allows the manager to administratively create new rows.')
dlswDirNBEntries = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirNBEntries.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBEntries.setDescription('The current total number of entries in the dlswDirNBTable.')
dlswDirNBCacheHits = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirNBCacheHits.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBCacheHits.setDescription('The number of times a cache search for a particular NetBIOS name resulted in success.')
dlswDirNBCacheMisses = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirNBCacheMisses.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBCacheMisses.setDescription('The number of times a cache search for a particular NetBIOS name resulted in failure.')
dlswDirNBCacheNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirNBCacheNextIndex.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBCacheNextIndex.setDescription('The next value of dlswDirNBIndex to be assigned by the agent. A retrieval of this object atomically reserves the returned value for use by the manager to create a row in dlswDirNBTable. This makes it possible for the agent to control the index space for the NetBIOS name cache, yet allows the manager to administratively create new rows.')
dlswDirCache = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 4, 2))
dlswDirMacTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1), )
if mibBuilder.loadTexts: dlswDirMacTable.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacTable.setDescription('This table contains locations of MAC addresses. They could be either verified or not verified, local or remote, and configured locally or learned from either Capabilities Exchange messages or directory searches.')
dlswDirMacEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1), ).setIndexNames((0, "DLSW-MIB", "dlswDirMacIndex"))
if mibBuilder.loadTexts: dlswDirMacEntry.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacEntry.setDescription('Indexed by dlswDirMacIndex.')
dlswDirMacIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dlswDirMacIndex.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacIndex.setDescription('Uniquely identifies a conceptual row of this table.')
dlswDirMacMac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 2), MacAddressNC()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacMac.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacMac.setDescription('The MAC address, together with the dlswDirMacMask, specifies a set of MAC addresses that are defined or discovered through an interface or partner DLSw nodes.')
dlswDirMacMask = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 3), MacAddressNC().clone(hexValue="FFFFFFFFFFFF")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacMask.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacMask.setDescription('The MAC address mask, together with the dlswDirMacMac, specifies a set of MAC addresses that are defined or discovered through an interface or partner DLSw nodes.')
dlswDirMacEntryType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("userConfiguredPublic", 2), ("userConfiguredPrivate", 3), ("partnerCapExMsg", 4), ("dynamic", 5))).clone('userConfiguredPublic')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacEntryType.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacEntryType.setDescription('The cause of the creation of this conceptual row. It could be one of the three methods: (1) user configured, including via management protocol set operations, configuration file, command line or equivalent methods; (2) learned from the partner DLSw Capabilities Exchange messages; and (3) dynamic, e.g., learned from ICanReach messages, or LAN explorer frames. Since only individual MAC addresses can be dynamically learned, dynamic entries will all have a mask of all FFs. The public versus private distinction for user- configured resources applies only to local resources (UC remote resources are private), and indicates whether that resource should be advertised in capabilities exchange messages sent by this node.')
dlswDirMacLocationType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("local", 2), ("remote", 3))).clone('local')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacLocationType.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacLocationType.setDescription('The location of the resource (or a collection of resources using a mask) of this conceptual row is either (1) local - the resource is reachable via an interface, or (2) remote - the resource is reachable via a partner DLSw node (or a set of partner DLSw nodes).')
dlswDirMacLocation = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 6), RowPointer().clone((0, 0))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacLocation.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacLocation.setDescription('Points to either the ifEntry, dlswTConnConfigEntry, dlswTConnOperEntry, 0.0, or something that is implementation specific. It identifies the location of the MAC address (or the collection of MAC addresses.)')
dlswDirMacStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("reachable", 2), ("notReachable", 3))).clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacStatus.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacStatus.setDescription("This object specifies whether DLSw currently believes the MAC address to be accessible at the specified location. The value `notReachable' allows a configured resource definition to be taken out of service when a search to that resource fails (avoiding a repeat of the search).")
dlswDirMacLFSize = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 8), LFSize().clone('lfs65535')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacLFSize.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacLFSize.setDescription('The largest size of the MAC INFO field (LLC header and data) that a circuit to the MAC address can carry through this path.')
dlswDirMacRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacRowStatus.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacRowStatus.setDescription('This object is used by the manager to create or delete the row entry in the dlswDirMacTable following the RowStatus textual convention.')
dlswDirNBTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2), )
if mibBuilder.loadTexts: dlswDirNBTable.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBTable.setDescription('This table contains locations of NetBIOS names. They could be either verified or not verified, local or remote, and configured locally or learned from either Capabilities Exchange messages or directory searches.')
dlswDirNBEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1), ).setIndexNames((0, "DLSW-MIB", "dlswDirNBIndex"))
if mibBuilder.loadTexts: dlswDirNBEntry.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBEntry.setDescription('Indexed by dlswDirNBIndex.')
dlswDirNBIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dlswDirNBIndex.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBIndex.setDescription('Uniquely identifies a conceptual row of this table.')
dlswDirNBName = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 2), NBName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBName.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBName.setDescription("The NetBIOS name (including `any char' and `wildcard' characters) specifies a set of NetBIOS names that are defined or discovered through an interface or partner DLSw nodes.")
dlswDirNBNameType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("individual", 2), ("group", 3))).clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBNameType.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBNameType.setDescription('Whether dlswDirNBName represents an (or a set of) individual or group NetBIOS name(s).')
dlswDirNBEntryType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("userConfiguredPublic", 2), ("userConfiguredPrivate", 3), ("partnerCapExMsg", 4), ("dynamic", 5))).clone('userConfiguredPublic')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBEntryType.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBEntryType.setDescription('The cause of the creation of this conceptual row. It could be one of the three methods: (1) user configured, including via management protocol set operations, configuration file, command line, or equivalent methods; (2) learned from the partner DLSw Capabilities Exchange messages; and (3) dynamic, e.g., learned from ICanReach messages, or test frames. Since only actual NetBIOS names can be dynamically learned, dynamic entries will not contain any char or wildcard characters. The public versus private distinction for user- configured resources applies only to local resources (UC remote resources are private), and indicates whether that resource should be advertised in capabilities exchange messages sent by this node.')
dlswDirNBLocationType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("local", 2), ("remote", 3))).clone('local')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBLocationType.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBLocationType.setDescription('The location of the resource (or a collection of resources using any char/wildcard characters) of this conceptual row is either (1) local - the resource is reachable via an interface, or (2) remote - the resource is reachable via a a partner DLSw node (or a set of partner DLSw nodes).')
dlswDirNBLocation = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 6), RowPointer().clone((0, 0))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBLocation.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBLocation.setDescription('Points to either the ifEntry, dlswTConnConfigEntry, dlswTConnOperEntry, 0.0, or something that is implementation specific. It identifies the location of the NetBIOS name or the set of NetBIOS names.')
dlswDirNBStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("reachable", 2), ("notReachable", 3))).clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBStatus.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBStatus.setDescription("This object specifies whether DLSw currently believes the NetBIOS name to be accessible at the specified location. The value `notReachable' allows a configured resource definition to be taken out of service when a search to that resource fails (avoiding a repeat of the search).")
dlswDirNBLFSize = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 8), LFSize().clone('lfs65535')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBLFSize.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBLFSize.setDescription('The largest size of the MAC INFO field (LLC header and data) that a circuit to the NB name can carry through this path.')
dlswDirNBRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBRowStatus.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBRowStatus.setDescription('This object is used by manager to create or delete the row entry in the dlswDirNBTable following the RowStatus textual convention.')
dlswDirLocate = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 4, 3))
dlswDirLocateMacTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 1), )
if mibBuilder.loadTexts: dlswDirLocateMacTable.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateMacTable.setDescription('This table is used to retrieve all entries in the dlswDirMacTable that match a given MAC address, in the order of the best matched first, the second best matched second, and so on, till no more entries match the given MAC address.')
dlswDirLocateMacEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 1, 1), ).setIndexNames((0, "DLSW-MIB", "dlswDirLocateMacMac"), (0, "DLSW-MIB", "dlswDirLocateMacMatch"))
if mibBuilder.loadTexts: dlswDirLocateMacEntry.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateMacEntry.setDescription('Indexed by dlswDirLocateMacMac and dlswDirLocateMacMatch. The first object is the MAC address of interest, and the second object is the order in the list of all entries that match the MAC address.')
dlswDirLocateMacMac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 1, 1, 1), MacAddressNC())
if mibBuilder.loadTexts: dlswDirLocateMacMac.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateMacMac.setDescription('The MAC address to be located.')
dlswDirLocateMacMatch = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)))
if mibBuilder.loadTexts: dlswDirLocateMacMatch.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateMacMatch.setDescription('The order of the entries of dlswDirMacTable that match dlswDirLocateMacMac. A value of one represents the entry that best matches the MAC address. A value of two represents the second best matched entry, and so on.')
dlswDirLocateMacLocation = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 1, 1, 3), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirLocateMacLocation.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateMacLocation.setDescription('Points to the dlswDirMacEntry.')
dlswDirLocateNBTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 2), )
if mibBuilder.loadTexts: dlswDirLocateNBTable.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBTable.setDescription('This table is used to retrieve all entries in the dlswDirNBTable that match a given NetBIOS name, in the order of the best matched first, the second best matched second, and so on, till no more entries match the given NetBIOS name.')
dlswDirLocateNBEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 2, 1), ).setIndexNames((0, "DLSW-MIB", "dlswDirLocateNBName"), (0, "DLSW-MIB", "dlswDirLocateNBMatch"))
if mibBuilder.loadTexts: dlswDirLocateNBEntry.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBEntry.setDescription('Indexed by dlswDirLocateNBName and dlswDirLocateNBMatch. The first object is the NetBIOS name of interest, and the second object is the order in the list of all entries that match the NetBIOS name.')
dlswDirLocateNBName = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 2, 1, 1), NBName())
if mibBuilder.loadTexts: dlswDirLocateNBName.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBName.setDescription('The NetBIOS name to be located (no any char or wildcards).')
dlswDirLocateNBMatch = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)))
if mibBuilder.loadTexts: dlswDirLocateNBMatch.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBMatch.setDescription('The order of the entries of dlswDirNBTable that match dlswDirLocateNBName. A value of one represents the entry that best matches the NetBIOS name. A value of two represents the second best matched entry, and so on.')
dlswDirLocateNBLocation = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 2, 1, 3), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirLocateNBLocation.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBLocation.setDescription('Points to the dlswDirNBEntry.')
dlswCircuitStat = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 5, 1))
dlswCircuitStatActives = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 5, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitStatActives.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitStatActives.setDescription('The current number of circuits in dlswCircuitTable that are not in the disconnected state.')
dlswCircuitStatCreates = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 5, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitStatCreates.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitStatCreates.setDescription("The total number of entries ever added to dlswCircuitTable, or reactivated upon exiting `disconnected' state.")
dlswCircuitTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 5, 2), )
if mibBuilder.loadTexts: dlswCircuitTable.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitTable.setDescription('This table is the circuit representation in the DLSw entity. Virtual data links are used to represent any internal end stations. There is a conceptual row associated with each data link. Thus, for circuits without an intervening transport connection, there are two conceptual rows for each circuit. The table consists of the circuits being established, established, and as an implementation option, circuits that have been disconnected. For circuits carried over transport connections, an entry is created after the CUR_cs was sent or received. For circuits between two locally attached devices, or internal virtual MAC addresses, an entry is created when the equivalent of CUR_cs sent/received status is reached. End station 1 (S1) and End station 2 (S2) are used to represent the two end stations of the circuit. S1 is always an end station which is locally attached. S2 may be locally attached or remote. If it is locally attached, the circuit will be represented by two rows indexed by (A, B) and (B, A) where A & B are the relevant MACs/SAPs. The table may be used to store the causes of disconnection of circuits. It is recommended that the oldest disconnected circuit entry be removed from this table when the memory space of disconnected circuits is needed.')
dlswCircuitEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1), ).setIndexNames((0, "DLSW-MIB", "dlswCircuitS1Mac"), (0, "DLSW-MIB", "dlswCircuitS1Sap"), (0, "DLSW-MIB", "dlswCircuitS2Mac"), (0, "DLSW-MIB", "dlswCircuitS2Sap"))
if mibBuilder.loadTexts: dlswCircuitEntry.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitEntry.setDescription('')
dlswCircuitS1Mac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 1), MacAddressNC())
if mibBuilder.loadTexts: dlswCircuitS1Mac.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1Mac.setDescription('The MAC Address of End Station 1 (S1) used for this circuit.')
dlswCircuitS1Sap = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1))
if mibBuilder.loadTexts: dlswCircuitS1Sap.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1Sap.setDescription('The SAP at End Station 1 (S1) used for this circuit.')
dlswCircuitS1IfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS1IfIndex.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1IfIndex.setDescription('The ifEntry index of the local interface through which S1 can be reached.')
dlswCircuitS1DlcType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 4), DlcType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS1DlcType.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1DlcType.setDescription('The DLC protocol in use between the DLSw node and S1.')
dlswCircuitS1RouteInfo = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS1RouteInfo.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1RouteInfo.setDescription('If source-route bridging is in use between the DLSw node and S1, this is the routing information field describing the path between the two devices. Otherwise the value will be an OCTET STRING of zero length.')
dlswCircuitS1CircuitId = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 6), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(8, 8), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS1CircuitId.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1CircuitId.setDescription('The Circuit ID assigned by this DLSw node to this circuit. The first four octets are the DLC port Id, and the second four octets are the Data Link Correlator. If the DLSw SSP was not used to establish this circuit, the value will be a string of zero length.')
dlswCircuitS1Dlc = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 7), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS1Dlc.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1Dlc.setDescription('Points to a conceptual row of the underlying DLC MIB, which could either be the standard MIBs (e.g., the SDLC), or an enterprise-specific DLC MIB.')
dlswCircuitS2Mac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 8), MacAddressNC())
if mibBuilder.loadTexts: dlswCircuitS2Mac.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2Mac.setDescription('The MAC Address of End Station 2 (S2) used for this circuit.')
dlswCircuitS2Sap = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1))
if mibBuilder.loadTexts: dlswCircuitS2Sap.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2Sap.setDescription('The SAP at End Station 2 (S2) used for this circuit.')
dlswCircuitS2Location = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 10), EndStationLocation()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS2Location.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2Location.setDescription('The location of End Station 2 (S2). If the location of End Station 2 is local, the interface information will be available in the conceptual row whose S1 and S2 are the S2 and the S1 of this conceptual row, respectively.')
dlswCircuitS2TDomain = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 11), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS2TDomain.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2TDomain.setDescription('If the location of End Station 2 is remote, this value is the transport domain of the transport protocol the circuit is running over. Otherwise, the value is 0.0.')
dlswCircuitS2TAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 12), TAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS2TAddress.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2TAddress.setDescription('If the location of End Station 2 is remote, this object contains the address of the partner DLSw, else it will be an OCTET STRING of zero length.')
dlswCircuitS2CircuitId = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 13), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(8, 8), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS2CircuitId.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2CircuitId.setDescription('The Circuit ID assigned to this circuit by the partner DLSw node. The first four octets are the DLC port Id, and the second four octets are the Data Link Correlator. If the DLSw SSP was not used to establish this circuit, the value will be a string of zero length.')
dlswCircuitOrigin = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("s1", 1), ("s2", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitOrigin.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitOrigin.setDescription('This object specifies which of the two end stations initiated the establishment of this circuit.')
dlswCircuitEntryTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 15), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitEntryTime.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitEntryTime.setDescription('The amount of time (in hundredths of a second) since this circuit table conceptual row was created.')
dlswCircuitStateTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 16), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitStateTime.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitStateTime.setDescription('The amount of time (in hundredths of a second) since this circuit entered the current state.')
dlswCircuitState = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))).clone(namedValues=NamedValues(("disconnected", 1), ("circuitStart", 2), ("resolvePending", 3), ("circuitPending", 4), ("circuitEstablished", 5), ("connectPending", 6), ("contactPending", 7), ("connected", 8), ("disconnectPending", 9), ("haltPending", 10), ("haltPendingNoack", 11), ("circuitRestart", 12), ("restartPending", 13)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswCircuitState.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitState.setDescription("The current state of this circuit. The agent, implementation specific, may choose to keep entries for some period of time after circuit disconnect, so the manager can gather the time and cause of disconnection. While all of the specified values may be returned from a GET operation, the only SETable value is `disconnectPending'. When this value is set, DLSw should perform the appropriate action given its previous state (e.g., send HALT_DL if the state was `connected') to bring the circuit down to the `disconnected' state. Both the partner DLSw and local end station(s) should be notified as appropriate. This MIB provides no facility to re-establish a disconnected circuit, because in DLSw this should be an end station-driven function.")
dlswCircuitPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unsupported", 1), ("low", 2), ("medium", 3), ("high", 4), ("highest", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitPriority.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitPriority.setDescription("The transmission priority of this circuit as understood by this DLSw node. This value is determined by the two DLSw nodes at circuit startup time. If this DLSw node does not support DLSw circuit priority, the value `unsupported' should be returned.")
dlswCircuitFCSendGrantedUnits = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCSendGrantedUnits.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCSendGrantedUnits.setDescription('The number of paced SSP messages that this DLSw is currently authorized to send on this circuit before it must stop and wait for an additional flow control indication from the partner DLSw. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCSendCurrentWndw = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCSendCurrentWndw.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCSendCurrentWndw.setDescription("The current window size that this DLSw is using in its role as a data sender. This is the value by which this DLSw would increase the number of messages it is authorized to send, if it were to receive a flow control indication with the bits specifying `repeat window'. The value zero should be returned if this circuit is not running the DLSw pacing protocol.")
dlswCircuitFCRecvGrantedUnits = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCRecvGrantedUnits.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCRecvGrantedUnits.setDescription('The current number of paced SSP messages that this DLSw has authorized the partner DLSw to send on this circuit before the partner DLSw must stop and wait for an additional flow control indication from this DLSw. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCRecvCurrentWndw = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCRecvCurrentWndw.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCRecvCurrentWndw.setDescription("The current window size that this DLSw is using in its role as a data receiver. This is the number of additional paced SSP messages that this DLSw would be authorizing its DLSw partner to send, if this DLSw were to send a flow control indication with the bits specifying `repeat window'. The value zero should be returned if this circuit is not running the DLSw pacing protocol.")
dlswCircuitFCLargestRecvGranted = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 23), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCLargestRecvGranted.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCLargestRecvGranted.setDescription('The largest receive window size granted by this DLSw during the current activation of this circuit. This is not the largest number of messages granted at any time, but the largest window size as represented by FCIND operator bits. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCLargestSendGranted = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 24), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCLargestSendGranted.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCLargestSendGranted.setDescription('The largest send (with respect to this DLSw) window size granted by the partner DLSw during the current activation of this circuit. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCHalveWndwSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCHalveWndwSents.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCHalveWndwSents.setDescription('The number of Halve Window operations this DLSw has sent on this circuit, in its role as a data receiver. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCResetOpSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCResetOpSents.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCResetOpSents.setDescription('The number of Reset Window operations this DLSw has sent on this circuit, in its role as a data receiver. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCHalveWndwRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCHalveWndwRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCHalveWndwRcvds.setDescription('The number of Halve Window operations this DLSw has received on this circuit, in its role as a data sender. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCResetOpRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCResetOpRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCResetOpRcvds.setDescription('The number of Reset Window operations this DLSw has received on this circuit, in its role as a data sender. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitDiscReasonLocal = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("endStationDiscRcvd", 1), ("endStationDlcError", 2), ("protocolError", 3), ("operatorCommand", 4), ("haltDlRcvd", 5), ("haltDlNoAckRcvd", 6), ("transportConnClosed", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitDiscReasonLocal.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitDiscReasonLocal.setDescription('The reason why this circuit was last disconnected, as seen by this DLSw node. This object is present only if the agent keeps circuit table entries around for some period after circuit disconnect.')
dlswCircuitDiscReasonRemote = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("endStationDiscRcvd", 2), ("endStationDlcError", 3), ("protocolError", 4), ("operatorCommand", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitDiscReasonRemote.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitDiscReasonRemote.setDescription("The generic reason code why this circuit was last disconnected, as reported by the DLSw partner in a HALT_DL or HALT_DL_NOACK. If the partner does not send a reason code in these messages, or the DLSw implementation does not report receiving one, the value `unknown' is returned. This object is present only if the agent keeps circuit table entries around for some period after circuit disconnect.")
dlswCircuitDiscReasonRemoteData = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 31), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(4, 4), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitDiscReasonRemoteData.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitDiscReasonRemoteData.setDescription('Implementation-specific data reported by the DLSw partner in a HALT_DL or HALT_DL_NOACK, to help specify how and why this circuit was last disconnected. If the partner does not send this data in these messages, or the DLSw implementation does not report receiving it, a string of zero length is returned. This object is present only if the agent keeps circuit table entries around for some period after circuit disconnect.')
dlswSdlcLsEntries = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 6, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswSdlcLsEntries.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsEntries.setDescription('The number of entries in dlswSdlcLsTable.')
dlswSdlcLsTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 6, 2), )
if mibBuilder.loadTexts: dlswSdlcLsTable.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsTable.setDescription('The table defines the virtual MAC addresses for those SDLC link stations that participate in data link switching.')
dlswSdlcLsEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SNA-SDLC-MIB", "sdlcLSAddress"))
if mibBuilder.loadTexts: dlswSdlcLsEntry.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsEntry.setDescription('The index of this table is the ifIndex value for the SDLC port which owns this link station and the poll address of the particular SDLC link station.')
dlswSdlcLsLocalMac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 1), MacAddressNC()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsLocalMac.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsLocalMac.setDescription('The virtual MAC address used to represent the SDLC-attached link station to the rest of the DLSw network.')
dlswSdlcLsLocalSap = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsLocalSap.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsLocalSap.setDescription('The SAP used to represent this link station.')
dlswSdlcLsLocalIdBlock = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 3), DisplayString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(3, 3), )).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsLocalIdBlock.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsLocalIdBlock.setDescription('The block number is the first three digits of the node_id, if available. These 3 hexadecimal digits identify the product.')
dlswSdlcLsLocalIdNum = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 4), DisplayString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(5, 5), )).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsLocalIdNum.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsLocalIdNum.setDescription('The ID number is the last 5 digits of the node_id, if available. These 5 hexadecimal digits are administratively defined and combined with the 3 digit block number form the node_id. This node_id is used to identify the local node and is included in SNA XIDs.')
dlswSdlcLsRemoteMac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 5), MacAddressNC().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsRemoteMac.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsRemoteMac.setDescription('The MAC address to which DLSw should attempt to connect this link station. If this information is not available, a length of zero for this object should be returned.')
dlswSdlcLsRemoteSap = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 6), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(1, 1), )).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsRemoteSap.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsRemoteSap.setDescription('The SAP of the remote station to which this link station should be connected. If this information is not available, a length of zero for this object should be returned.')
dlswSdlcLsRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsRowStatus.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsRowStatus.setDescription('This object is used by the manager to create or delete the row entry in the dlswSdlcLsTable following the RowStatus textual convention.')
dlswTrapControl = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 1, 10))
dlswTrapCntlTConnPartnerReject = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 10, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("partial", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswTrapCntlTConnPartnerReject.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCntlTConnPartnerReject.setDescription("Indicates whether the DLSw is permitted to emit partner reject related traps. With the value of `enabled' the DLSw will emit all partner reject related traps. With the value of `disabled' the DLSw will not emit any partner reject related traps. With the value of `partial' the DLSw will only emits partner reject traps for CapEx reject. The changes take effect immediately.")
dlswTrapCntlTConnProtViolation = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 10, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswTrapCntlTConnProtViolation.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCntlTConnProtViolation.setDescription('Indicates whether the DLSw is permitted to generate protocol-violation traps on the events such as window size violation. The changes take effect immediately.')
dlswTrapCntlTConn = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 10, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("partial", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswTrapCntlTConn.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCntlTConn.setDescription("Indicates whether the DLSw is permitted to emit transport connection up and down traps. With the value of `enabled' the DLSw will emit traps when connections enter `connected' and `disconnected' states. With the value of `disabled' the DLSw will not emit traps when connections enter of `connected' and `disconnected' states. With the value of `partial' the DLSw will only emits transport connection down traps when the connection is closed with busy. The changes take effect immediately.")
dlswTrapCntlCircuit = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 10, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("partial", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswTrapCntlCircuit.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCntlCircuit.setDescription("Indicates whether the DLSw is permitted to generate circuit up and down traps. With the value of `enabled' the DLSw will emit traps when circuits enter `connected' and `disconnected' states. With the value of `disabled' the DLSw will not emit traps when circuits enter of `connected' and `disconnected' states. With the value of `partial' the DLSw will emit traps only for those circuits that are initiated by this DLSw, e.g., originating the CUR_CS message. The changes take effect immediately.")
dlswTraps = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 0))
dlswTrapTConnPartnerReject = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 1)).setObjects(("DLSW-MIB", "dlswTConnOperTDomain"), ("DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTrapTConnPartnerReject.setStatus('current')
if mibBuilder.loadTexts: dlswTrapTConnPartnerReject.setDescription('This trap is sent each time a transport connection is rejected by a partner DLSw during Capabilities Exchanges. The emission of this trap is controlled by dlswTrapCntlTConnPartnerReject.')
dlswTrapTConnProtViolation = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 2)).setObjects(("DLSW-MIB", "dlswTConnOperTDomain"), ("DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTrapTConnProtViolation.setStatus('current')
if mibBuilder.loadTexts: dlswTrapTConnProtViolation.setDescription('This trap is sent each time a protocol violation is detected for a transport connection. The emission of this trap is controlled by dlswTrapCntlTConnProtViolation.')
dlswTrapTConnUp = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 3)).setObjects(("DLSW-MIB", "dlswTConnOperTDomain"), ("DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTrapTConnUp.setStatus('current')
if mibBuilder.loadTexts: dlswTrapTConnUp.setDescription("This trap is sent each time a transport connection enters `connected' state. The emission of this trap is controlled by dlswTrapCntlTConn.")
dlswTrapTConnDown = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 4)).setObjects(("DLSW-MIB", "dlswTConnOperTDomain"), ("DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTrapTConnDown.setStatus('current')
if mibBuilder.loadTexts: dlswTrapTConnDown.setDescription("This trap is sent each time a transport connection enters `disconnected' state. The emission of this trap is controlled by dlswTrapCntlTConn.")
dlswTrapCircuitUp = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 5)).setObjects(("DLSW-MIB", "dlswCircuitS1Mac"), ("DLSW-MIB", "dlswCircuitS1Sap"), ("DLSW-MIB", "dlswCircuitS2Mac"), ("DLSW-MIB", "dlswCircuitS2Sap"))
if mibBuilder.loadTexts: dlswTrapCircuitUp.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCircuitUp.setDescription("This trap is sent each time a circuit enters `connected' state. The emission of this trap is controlled by dlswTrapCntlCircuit.")
dlswTrapCircuitDown = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 6)).setObjects(("DLSW-MIB", "dlswCircuitS1Mac"), ("DLSW-MIB", "dlswCircuitS1Sap"), ("DLSW-MIB", "dlswCircuitS2Mac"), ("DLSW-MIB", "dlswCircuitS2Sap"))
if mibBuilder.loadTexts: dlswTrapCircuitDown.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCircuitDown.setDescription("This trap is sent each time a circuit enters `disconnected' state. The emission of this trap is controlled by dlswTrapCntlCircuit.")
dlswConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 3))
dlswCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 3, 1))
dlswGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 3, 2))
dlswCoreCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 46, 3, 1, 1)).setObjects(("DLSW-MIB", "dlswNodeGroup"), ("DLSW-MIB", "dlswTConnStatGroup"), ("DLSW-MIB", "dlswTConnConfigGroup"), ("DLSW-MIB", "dlswTConnOperGroup"), ("DLSW-MIB", "dlswInterfaceGroup"), ("DLSW-MIB", "dlswCircuitGroup"), ("DLSW-MIB", "dlswCircuitStatGroup"), ("DLSW-MIB", "dlswNotificationGroup"), ("DLSW-MIB", "dlswNodeNBGroup"), ("DLSW-MIB", "dlswTConnNBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswCoreCompliance = dlswCoreCompliance.setStatus('current')
if mibBuilder.loadTexts: dlswCoreCompliance.setDescription('The core compliance statement for all DLSw nodes.')
dlswTConnTcpCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 46, 3, 1, 2)).setObjects(("DLSW-MIB", "dlswTConnTcpConfigGroup"), ("DLSW-MIB", "dlswTConnTcpOperGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnTcpCompliance = dlswTConnTcpCompliance.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpCompliance.setDescription('Compliance for DLSw nodes that use TCP as a transport connection protocol.')
dlswDirCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 46, 3, 1, 3)).setObjects(("DLSW-MIB", "dlswDirGroup"), ("DLSW-MIB", "dlswDirNBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirCompliance = dlswDirCompliance.setStatus('current')
if mibBuilder.loadTexts: dlswDirCompliance.setDescription('Compliance for DLSw nodes that provide a directory function.')
dlswDirLocateCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 46, 3, 1, 4)).setObjects(("DLSW-MIB", "dlswDirLocateGroup"), ("DLSW-MIB", "dlswDirLocateNBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirLocateCompliance = dlswDirLocateCompliance.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateCompliance.setDescription('Compliance for DLSw nodes that provide an ordered list of directory entries for a given resource.')
dlswSdlcCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 46, 3, 1, 5)).setObjects(("DLSW-MIB", "dlswSdlcGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswSdlcCompliance = dlswSdlcCompliance.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcCompliance.setDescription('Compliance for DLSw nodes that support SDLC.')
dlswNodeGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 1)).setObjects(("DLSW-MIB", "dlswNodeVersion"), ("DLSW-MIB", "dlswNodeVendorID"), ("DLSW-MIB", "dlswNodeVersionString"), ("DLSW-MIB", "dlswNodeStdPacingSupport"), ("DLSW-MIB", "dlswNodeStatus"), ("DLSW-MIB", "dlswNodeUpTime"), ("DLSW-MIB", "dlswNodeVirtualSegmentLFSize"), ("DLSW-MIB", "dlswNodeResourceMacExclusivity"), ("DLSW-MIB", "dlswTrapCntlTConnPartnerReject"), ("DLSW-MIB", "dlswTrapCntlTConnProtViolation"), ("DLSW-MIB", "dlswTrapCntlTConn"), ("DLSW-MIB", "dlswTrapCntlCircuit"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswNodeGroup = dlswNodeGroup.setStatus('current')
if mibBuilder.loadTexts: dlswNodeGroup.setDescription('Conformance group for DLSw node general information.')
dlswNodeNBGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 2)).setObjects(("DLSW-MIB", "dlswNodeResourceNBExclusivity"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswNodeNBGroup = dlswNodeNBGroup.setStatus('current')
if mibBuilder.loadTexts: dlswNodeNBGroup.setDescription('Conformance group for DLSw node general information specifically for nodes that support NetBIOS.')
dlswTConnStatGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 3)).setObjects(("DLSW-MIB", "dlswTConnStatActiveConnections"), ("DLSW-MIB", "dlswTConnStatCloseIdles"), ("DLSW-MIB", "dlswTConnStatCloseBusys"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnStatGroup = dlswTConnStatGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnStatGroup.setDescription('Conformance group for statistics for transport connections.')
dlswTConnConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 4)).setObjects(("DLSW-MIB", "dlswTConnConfigTDomain"), ("DLSW-MIB", "dlswTConnConfigLocalTAddr"), ("DLSW-MIB", "dlswTConnConfigRemoteTAddr"), ("DLSW-MIB", "dlswTConnConfigLastModifyTime"), ("DLSW-MIB", "dlswTConnConfigEntryType"), ("DLSW-MIB", "dlswTConnConfigGroupDefinition"), ("DLSW-MIB", "dlswTConnConfigSetupType"), ("DLSW-MIB", "dlswTConnConfigSapList"), ("DLSW-MIB", "dlswTConnConfigAdvertiseMacNB"), ("DLSW-MIB", "dlswTConnConfigInitCirRecvWndw"), ("DLSW-MIB", "dlswTConnConfigOpens"), ("DLSW-MIB", "dlswTConnConfigRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnConfigGroup = dlswTConnConfigGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigGroup.setDescription('Conformance group for the configuration of transport connections.')
dlswTConnOperGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 5)).setObjects(("DLSW-MIB", "dlswTConnOperLocalTAddr"), ("DLSW-MIB", "dlswTConnOperEntryTime"), ("DLSW-MIB", "dlswTConnOperConnectTime"), ("DLSW-MIB", "dlswTConnOperState"), ("DLSW-MIB", "dlswTConnOperConfigIndex"), ("DLSW-MIB", "dlswTConnOperFlowCntlMode"), ("DLSW-MIB", "dlswTConnOperPartnerVersion"), ("DLSW-MIB", "dlswTConnOperPartnerVendorID"), ("DLSW-MIB", "dlswTConnOperPartnerVersionStr"), ("DLSW-MIB", "dlswTConnOperPartnerInitPacingWndw"), ("DLSW-MIB", "dlswTConnOperPartnerSapList"), ("DLSW-MIB", "dlswTConnOperPartnerMacExcl"), ("DLSW-MIB", "dlswTConnOperPartnerMacInfo"), ("DLSW-MIB", "dlswTConnOperDiscTime"), ("DLSW-MIB", "dlswTConnOperDiscReason"), ("DLSW-MIB", "dlswTConnOperDiscActiveCir"), ("DLSW-MIB", "dlswTConnOperInDataPkts"), ("DLSW-MIB", "dlswTConnOperOutDataPkts"), ("DLSW-MIB", "dlswTConnOperInDataOctets"), ("DLSW-MIB", "dlswTConnOperOutDataOctets"), ("DLSW-MIB", "dlswTConnOperInCntlPkts"), ("DLSW-MIB", "dlswTConnOperOutCntlPkts"), ("DLSW-MIB", "dlswTConnOperCURexSents"), ("DLSW-MIB", "dlswTConnOperICRexRcvds"), ("DLSW-MIB", "dlswTConnOperCURexRcvds"), ("DLSW-MIB", "dlswTConnOperICRexSents"), ("DLSW-MIB", "dlswTConnOperCirCreates"), ("DLSW-MIB", "dlswTConnOperCircuits"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnOperGroup = dlswTConnOperGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperGroup.setDescription('Conformance group for operation information for transport connections.')
dlswTConnNBGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 6)).setObjects(("DLSW-MIB", "dlswTConnOperPartnerNBExcl"), ("DLSW-MIB", "dlswTConnOperPartnerNBInfo"), ("DLSW-MIB", "dlswTConnOperNQexSents"), ("DLSW-MIB", "dlswTConnOperNRexRcvds"), ("DLSW-MIB", "dlswTConnOperNQexRcvds"), ("DLSW-MIB", "dlswTConnOperNRexSents"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnNBGroup = dlswTConnNBGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnNBGroup.setDescription('Conformance group for operation information for transport connections, specifically for nodes that support NetBIOS.')
dlswTConnTcpConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 7)).setObjects(("DLSW-MIB", "dlswTConnTcpConfigKeepAliveInt"), ("DLSW-MIB", "dlswTConnTcpConfigTcpConnections"), ("DLSW-MIB", "dlswTConnTcpConfigMaxSegmentSize"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnTcpConfigGroup = dlswTConnTcpConfigGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigGroup.setDescription('Conformance group for configuration information for transport connections using TCP.')
dlswTConnTcpOperGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 8)).setObjects(("DLSW-MIB", "dlswTConnTcpOperKeepAliveInt"), ("DLSW-MIB", "dlswTConnTcpOperPrefTcpConnections"), ("DLSW-MIB", "dlswTConnTcpOperTcpConnections"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnTcpOperGroup = dlswTConnTcpOperGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperGroup.setDescription('Conformance group for operation information for transport connections using TCP.')
dlswInterfaceGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 9)).setObjects(("DLSW-MIB", "dlswIfRowStatus"), ("DLSW-MIB", "dlswIfVirtualSegment"), ("DLSW-MIB", "dlswIfSapList"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswInterfaceGroup = dlswInterfaceGroup.setStatus('current')
if mibBuilder.loadTexts: dlswInterfaceGroup.setDescription('Conformance group for DLSw interfaces.')
dlswDirGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 10)).setObjects(("DLSW-MIB", "dlswDirMacEntries"), ("DLSW-MIB", "dlswDirMacCacheHits"), ("DLSW-MIB", "dlswDirMacCacheMisses"), ("DLSW-MIB", "dlswDirMacCacheNextIndex"), ("DLSW-MIB", "dlswDirMacMac"), ("DLSW-MIB", "dlswDirMacMask"), ("DLSW-MIB", "dlswDirMacEntryType"), ("DLSW-MIB", "dlswDirMacLocationType"), ("DLSW-MIB", "dlswDirMacLocation"), ("DLSW-MIB", "dlswDirMacStatus"), ("DLSW-MIB", "dlswDirMacLFSize"), ("DLSW-MIB", "dlswDirMacRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirGroup = dlswDirGroup.setStatus('current')
if mibBuilder.loadTexts: dlswDirGroup.setDescription('Conformance group for DLSw directory using MAC addresses.')
dlswDirNBGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 11)).setObjects(("DLSW-MIB", "dlswDirNBEntries"), ("DLSW-MIB", "dlswDirNBCacheHits"), ("DLSW-MIB", "dlswDirNBCacheMisses"), ("DLSW-MIB", "dlswDirNBCacheNextIndex"), ("DLSW-MIB", "dlswDirNBName"), ("DLSW-MIB", "dlswDirNBNameType"), ("DLSW-MIB", "dlswDirNBEntryType"), ("DLSW-MIB", "dlswDirNBLocationType"), ("DLSW-MIB", "dlswDirNBLocation"), ("DLSW-MIB", "dlswDirNBStatus"), ("DLSW-MIB", "dlswDirNBLFSize"), ("DLSW-MIB", "dlswDirNBRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirNBGroup = dlswDirNBGroup.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBGroup.setDescription('Conformance group for DLSw directory using NetBIOS names.')
dlswDirLocateGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 12)).setObjects(("DLSW-MIB", "dlswDirLocateMacLocation"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirLocateGroup = dlswDirLocateGroup.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateGroup.setDescription('Conformance group for a node that can return directory entry order for a given MAC address.')
dlswDirLocateNBGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 13)).setObjects(("DLSW-MIB", "dlswDirLocateNBLocation"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirLocateNBGroup = dlswDirLocateNBGroup.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBGroup.setDescription('Conformance group for a node that can return directory entry order for a given NetBIOS name.')
dlswCircuitStatGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 14)).setObjects(("DLSW-MIB", "dlswCircuitStatActives"), ("DLSW-MIB", "dlswCircuitStatCreates"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswCircuitStatGroup = dlswCircuitStatGroup.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitStatGroup.setDescription('Conformance group for statistics about circuits.')
dlswCircuitGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 15)).setObjects(("DLSW-MIB", "dlswCircuitS1IfIndex"), ("DLSW-MIB", "dlswCircuitS1DlcType"), ("DLSW-MIB", "dlswCircuitS1RouteInfo"), ("DLSW-MIB", "dlswCircuitS1CircuitId"), ("DLSW-MIB", "dlswCircuitS1Dlc"), ("DLSW-MIB", "dlswCircuitS2Location"), ("DLSW-MIB", "dlswCircuitS2TDomain"), ("DLSW-MIB", "dlswCircuitS2TAddress"), ("DLSW-MIB", "dlswCircuitS2CircuitId"), ("DLSW-MIB", "dlswCircuitOrigin"), ("DLSW-MIB", "dlswCircuitEntryTime"), ("DLSW-MIB", "dlswCircuitStateTime"), ("DLSW-MIB", "dlswCircuitState"), ("DLSW-MIB", "dlswCircuitPriority"), ("DLSW-MIB", "dlswCircuitFCSendGrantedUnits"), ("DLSW-MIB", "dlswCircuitFCSendCurrentWndw"), ("DLSW-MIB", "dlswCircuitFCRecvGrantedUnits"), ("DLSW-MIB", "dlswCircuitFCRecvCurrentWndw"), ("DLSW-MIB", "dlswCircuitFCLargestRecvGranted"), ("DLSW-MIB", "dlswCircuitFCLargestSendGranted"), ("DLSW-MIB", "dlswCircuitFCHalveWndwSents"), ("DLSW-MIB", "dlswCircuitFCResetOpSents"), ("DLSW-MIB", "dlswCircuitFCHalveWndwRcvds"), ("DLSW-MIB", "dlswCircuitFCResetOpRcvds"), ("DLSW-MIB", "dlswCircuitDiscReasonLocal"), ("DLSW-MIB", "dlswCircuitDiscReasonRemote"), ("DLSW-MIB", "dlswCircuitDiscReasonRemoteData"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswCircuitGroup = dlswCircuitGroup.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitGroup.setDescription('Conformance group for DLSw circuits.')
dlswSdlcGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 16)).setObjects(("DLSW-MIB", "dlswSdlcLsEntries"), ("DLSW-MIB", "dlswSdlcLsLocalMac"), ("DLSW-MIB", "dlswSdlcLsLocalSap"), ("DLSW-MIB", "dlswSdlcLsLocalIdBlock"), ("DLSW-MIB", "dlswSdlcLsLocalIdNum"), ("DLSW-MIB", "dlswSdlcLsRemoteMac"), ("DLSW-MIB", "dlswSdlcLsRemoteSap"), ("DLSW-MIB", "dlswSdlcLsRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswSdlcGroup = dlswSdlcGroup.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcGroup.setDescription('Conformance group for DLSw SDLC support.')
dlswNotificationGroup = NotificationGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 17)).setObjects(("DLSW-MIB", "dlswTrapTConnPartnerReject"), ("DLSW-MIB", "dlswTrapTConnProtViolation"), ("DLSW-MIB", "dlswTrapTConnUp"), ("DLSW-MIB", "dlswTrapTConnDown"), ("DLSW-MIB", "dlswTrapCircuitUp"), ("DLSW-MIB", "dlswTrapCircuitDown"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswNotificationGroup = dlswNotificationGroup.setStatus('current')
if mibBuilder.loadTexts: dlswNotificationGroup.setDescription('Conformance group for DLSw notifications.')
mibBuilder.exportSymbols("DLSW-MIB", dlswCircuitStatGroup=dlswCircuitStatGroup, dlswNodeVirtualSegmentLFSize=dlswNodeVirtualSegmentLFSize, dlswIfRowStatus=dlswIfRowStatus, dlswTrapTConnProtViolation=dlswTrapTConnProtViolation, dlswDirNBLFSize=dlswDirNBLFSize, dlswCircuitS2TDomain=dlswCircuitS2TDomain, dlswTConnStatCloseBusys=dlswTConnStatCloseBusys, dlswDirMacIndex=dlswDirMacIndex, dlswDirLocateNBGroup=dlswDirLocateNBGroup, dlswDirLocateNBLocation=dlswDirLocateNBLocation, dlswCircuitS2Location=dlswCircuitS2Location, dlswDirCache=dlswDirCache, dlswSdlcLsLocalSap=dlswSdlcLsLocalSap, dlswSdlcLsLocalIdBlock=dlswSdlcLsLocalIdBlock, dlswNotificationGroup=dlswNotificationGroup, dlswInterface=dlswInterface, dlswTrapTConnPartnerReject=dlswTrapTConnPartnerReject, dlswCircuitFCSendCurrentWndw=dlswCircuitFCSendCurrentWndw, dlswTrapCntlTConnProtViolation=dlswTrapCntlTConnProtViolation, EndStationLocation=EndStationLocation, dlswTConnOperDiscTime=dlswTConnOperDiscTime, dlswTConnOperPartnerInitPacingWndw=dlswTConnOperPartnerInitPacingWndw, dlswTConnOperEntryTime=dlswTConnOperEntryTime, dlswTConnOperPartnerMacInfo=dlswTConnOperPartnerMacInfo, dlswTConnOperCURexSents=dlswTConnOperCURexSents, dlswDirStat=dlswDirStat, dlswDirMacCacheHits=dlswDirMacCacheHits, dlswDirLocate=dlswDirLocate, dlswCircuitOrigin=dlswCircuitOrigin, dlswDirMacCacheMisses=dlswDirMacCacheMisses, dlswTConnTcpOperKeepAliveInt=dlswTConnTcpOperKeepAliveInt, dlswCircuitFCLargestRecvGranted=dlswCircuitFCLargestRecvGranted, dlswCircuitS2CircuitId=dlswCircuitS2CircuitId, PYSNMP_MODULE_ID=dlsw, dlswTConnConfigIndex=dlswTConnConfigIndex, dlswDirNBGroup=dlswDirNBGroup, dlswNodeGroup=dlswNodeGroup, dlswTConnConfigInitCirRecvWndw=dlswTConnConfigInitCirRecvWndw, dlswMIB=dlswMIB, dlswDirMacLFSize=dlswDirMacLFSize, dlswTConnOperPartnerMacExcl=dlswTConnOperPartnerMacExcl, dlswDirCompliance=dlswDirCompliance, dlswTConnTcpConfigEntry=dlswTConnTcpConfigEntry, dlswDirNBLocationType=dlswDirNBLocationType, dlswNode=dlswNode, dlswTConnConfigEntry=dlswTConnConfigEntry, dlswSdlcLsLocalIdNum=dlswSdlcLsLocalIdNum, dlsw=dlsw, dlswDirNBLocation=dlswDirNBLocation, dlswTConnStatCloseIdles=dlswTConnStatCloseIdles, dlswTConnOperEntry=dlswTConnOperEntry, dlswDirLocateNBEntry=dlswDirLocateNBEntry, dlswTraps=dlswTraps, dlswCircuitStatCreates=dlswCircuitStatCreates, dlswDirNBCacheHits=dlswDirNBCacheHits, dlswDirNBNameType=dlswDirNBNameType, dlswTConnOperCirCreates=dlswTConnOperCirCreates, dlswTConnConfigTDomain=dlswTConnConfigTDomain, dlswTConnOperInCntlPkts=dlswTConnOperInCntlPkts, dlswIfEntry=dlswIfEntry, dlswDirNBCacheNextIndex=dlswDirNBCacheNextIndex, null=null, dlswTConnStatActiveConnections=dlswTConnStatActiveConnections, DlcType=DlcType, dlswTConnOperInDataOctets=dlswTConnOperInDataOctets, dlswIfSapList=dlswIfSapList, dlswDirMacEntryType=dlswDirMacEntryType, dlswTConnOperTDomain=dlswTConnOperTDomain, dlswCircuitStatActives=dlswCircuitStatActives, TAddress=TAddress, dlswTConnOperNQexSents=dlswTConnOperNQexSents, dlswDirNBRowStatus=dlswDirNBRowStatus, dlswDirNBEntryType=dlswDirNBEntryType, dlswCircuitS1RouteInfo=dlswCircuitS1RouteInfo, dlswTConnConfigGroup=dlswTConnConfigGroup, dlswTConnConfigRowStatus=dlswTConnConfigRowStatus, dlswCircuitState=dlswCircuitState, dlswCircuitEntry=dlswCircuitEntry, dlswCircuitGroup=dlswCircuitGroup, dlswTConnOperOutDataPkts=dlswTConnOperOutDataPkts, dlswTConnTcpConfigTcpConnections=dlswTConnTcpConfigTcpConnections, dlswIfTable=dlswIfTable, dlswDirGroup=dlswDirGroup, dlswDirNBEntries=dlswDirNBEntries, dlswNodeStdPacingSupport=dlswNodeStdPacingSupport, dlswCircuitPriority=dlswCircuitPriority, dlswNodeStatus=dlswNodeStatus, dlswCircuitS2TAddress=dlswCircuitS2TAddress, dlswDirLocateCompliance=dlswDirLocateCompliance, dlswTConn=dlswTConn, dlswCircuitS1CircuitId=dlswCircuitS1CircuitId, dlswSdlcGroup=dlswSdlcGroup, NBName=NBName, dlswIfVirtualSegment=dlswIfVirtualSegment, dlswTConnOperPartnerNBExcl=dlswTConnOperPartnerNBExcl, dlswTConnOperNRexSents=dlswTConnOperNRexSents, dlswTConnTcpOperTable=dlswTConnTcpOperTable, dlswSdlcLsTable=dlswSdlcLsTable, dlswDirLocateMacTable=dlswDirLocateMacTable, dlswTConnOperNQexRcvds=dlswTConnOperNQexRcvds, dlswCircuitFCSendGrantedUnits=dlswCircuitFCSendGrantedUnits, dlswTConnOperTable=dlswTConnOperTable, dlswTConnConfigSapList=dlswTConnConfigSapList, dlswDirMacRowStatus=dlswDirMacRowStatus, DlswTCPAddress=DlswTCPAddress, dlswDirMacEntries=dlswDirMacEntries, dlswTConnConfigEntryType=dlswTConnConfigEntryType, dlswTConnOperInDataPkts=dlswTConnOperInDataPkts, dlswCircuitS2Mac=dlswCircuitS2Mac, dlswDirMacLocationType=dlswDirMacLocationType, dlswTConnOperFlowCntlMode=dlswTConnOperFlowCntlMode, dlswCircuitFCHalveWndwRcvds=dlswCircuitFCHalveWndwRcvds, dlswDirLocateMacEntry=dlswDirLocateMacEntry, dlswSdlc=dlswSdlc, dlswDirNBTable=dlswDirNBTable, dlswCircuitFCRecvGrantedUnits=dlswCircuitFCRecvGrantedUnits, dlswTConnStat=dlswTConnStat, dlswDirLocateNBTable=dlswDirLocateNBTable, dlswDirLocateNBMatch=dlswDirLocateNBMatch, dlswDirLocateGroup=dlswDirLocateGroup, dlswNodeVendorID=dlswNodeVendorID, dlswCircuitStateTime=dlswCircuitStateTime, dlswDirMacEntry=dlswDirMacEntry, dlswDirLocateMacMatch=dlswDirLocateMacMatch, dlswNodeUpTime=dlswNodeUpTime, dlswTConnTcpConfigGroup=dlswTConnTcpConfigGroup, dlswCircuitTable=dlswCircuitTable, dlswCircuitFCHalveWndwSents=dlswCircuitFCHalveWndwSents, dlswTConnConfigOpens=dlswTConnConfigOpens, dlswTConnTcpOperPrefTcpConnections=dlswTConnTcpOperPrefTcpConnections, dlswSdlcCompliance=dlswSdlcCompliance, dlswTConnConfigLocalTAddr=dlswTConnConfigLocalTAddr, dlswTConnOperConnectTime=dlswTConnOperConnectTime, dlswCircuitS2Sap=dlswCircuitS2Sap, dlswTConnNBGroup=dlswTConnNBGroup, dlswNodeResourceMacExclusivity=dlswNodeResourceMacExclusivity, dlswTrapTConnDown=dlswTrapTConnDown, dlswCircuitS1IfIndex=dlswCircuitS1IfIndex, dlswCircuitFCLargestSendGranted=dlswCircuitFCLargestSendGranted, dlswTrapCircuitUp=dlswTrapCircuitUp, dlswTrapCircuitDown=dlswTrapCircuitDown, dlswTrapCntlTConn=dlswTrapCntlTConn, dlswTConnOperRemoteTAddr=dlswTConnOperRemoteTAddr, dlswInterfaceGroup=dlswInterfaceGroup, dlswTConnOperState=dlswTConnOperState, dlswTrapCntlTConnPartnerReject=dlswTrapCntlTConnPartnerReject, dlswGroups=dlswGroups, dlswDirLocateMacLocation=dlswDirLocateMacLocation, dlswTConnTcpOperEntry=dlswTConnTcpOperEntry, dlswTConnConfigLastModifyTime=dlswTConnConfigLastModifyTime, dlswTConnOperConfigIndex=dlswTConnOperConfigIndex, dlswCircuitFCResetOpSents=dlswCircuitFCResetOpSents, dlswDirMacMac=dlswDirMacMac, dlswTConnTcpOperGroup=dlswTConnTcpOperGroup, dlswTConnOperDiscActiveCir=dlswTConnOperDiscActiveCir, dlswTConnConfigGroupDefinition=dlswTConnConfigGroupDefinition, dlswDirMacCacheNextIndex=dlswDirMacCacheNextIndex, dlswSdlcLsRemoteSap=dlswSdlcLsRemoteSap, dlswTConnTcpConfigMaxSegmentSize=dlswTConnTcpConfigMaxSegmentSize, dlswTConnStatGroup=dlswTConnStatGroup, dlswDirectory=dlswDirectory, dlswDirMacMask=dlswDirMacMask, dlswDirMacTable=dlswDirMacTable, dlswTConnTcpConfigKeepAliveInt=dlswTConnTcpConfigKeepAliveInt, dlswTConnOperICRexSents=dlswTConnOperICRexSents, dlswTrapControl=dlswTrapControl, dlswTConnConfigTable=dlswTConnConfigTable, MacAddressNC=MacAddressNC, dlswTConnOperICRexRcvds=dlswTConnOperICRexRcvds, dlswCircuitS1Sap=dlswCircuitS1Sap, dlswTConnOperOutCntlPkts=dlswTConnOperOutCntlPkts, dlswTConnOperOutDataOctets=dlswTConnOperOutDataOctets, dlswTConnOperNRexRcvds=dlswTConnOperNRexRcvds, dlswCircuitS1Mac=dlswCircuitS1Mac, dlswTConnConfigRemoteTAddr=dlswTConnConfigRemoteTAddr, dlswTConnOperPartnerVendorID=dlswTConnOperPartnerVendorID, dlswTConnOperCURexRcvds=dlswTConnOperCURexRcvds, dlswDirNBStatus=dlswDirNBStatus, dlswCircuitS1Dlc=dlswCircuitS1Dlc, dlswTrapCntlCircuit=dlswTrapCntlCircuit, dlswCircuitEntryTime=dlswCircuitEntryTime, dlswTConnConfigAdvertiseMacNB=dlswTConnConfigAdvertiseMacNB, dlswNodeResourceNBExclusivity=dlswNodeResourceNBExclusivity, dlswNodeNBGroup=dlswNodeNBGroup, dlswDirNBEntry=dlswDirNBEntry, dlswSdlcLsRowStatus=dlswSdlcLsRowStatus, LFSize=LFSize, dlswDomains=dlswDomains, dlswCircuitDiscReasonLocal=dlswCircuitDiscReasonLocal, dlswSdlcLsRemoteMac=dlswSdlcLsRemoteMac, dlswTConnConfigSetupType=dlswTConnConfigSetupType, dlswNodeVersionString=dlswNodeVersionString, dlswTConnOperPartnerVersion=dlswTConnOperPartnerVersion, dlswCircuitDiscReasonRemote=dlswCircuitDiscReasonRemote, dlswTConnOperGroup=dlswTConnOperGroup, dlswSdlcLsLocalMac=dlswSdlcLsLocalMac, dlswCircuitStat=dlswCircuitStat, dlswCircuitFCResetOpRcvds=dlswCircuitFCResetOpRcvds, dlswTConnTcpOperTcpConnections=dlswTConnTcpOperTcpConnections, dlswTConnTcp=dlswTConnTcp, dlswSdlcLsEntry=dlswSdlcLsEntry, dlswDirLocateNBName=dlswDirLocateNBName, dlswTConnOperPartnerSapList=dlswTConnOperPartnerSapList, dlswCircuitFCRecvCurrentWndw=dlswCircuitFCRecvCurrentWndw, dlswSdlcLsEntries=dlswSdlcLsEntries, dlswTConnOperDiscReason=dlswTConnOperDiscReason, dlswTConnOperPartnerNBInfo=dlswTConnOperPartnerNBInfo, dlswDirMacLocation=dlswDirMacLocation, dlswDirNBIndex=dlswDirNBIndex, dlswConformance=dlswConformance, dlswTConnTcpCompliance=dlswTConnTcpCompliance, dlswCircuit=dlswCircuit, dlswTConnTcpConfigTable=dlswTConnTcpConfigTable, dlswTConnOperCircuits=dlswTConnOperCircuits, dlswDirMacStatus=dlswDirMacStatus, dlswTConnOperLocalTAddr=dlswTConnOperLocalTAddr, dlswTConnOperPartnerVersionStr=dlswTConnOperPartnerVersionStr, dlswCircuitDiscReasonRemoteData=dlswCircuitDiscReasonRemoteData, dlswCircuitS1DlcType=dlswCircuitS1DlcType, dlswTConnSpecific=dlswTConnSpecific, dlswTCPDomain=dlswTCPDomain, dlswDirNBCacheMisses=dlswDirNBCacheMisses, dlswDirLocateMacMac=dlswDirLocateMacMac, dlswDirNBName=dlswDirNBName, dlswTrapTConnUp=dlswTrapTConnUp, dlswCoreCompliance=dlswCoreCompliance, dlswNodeVersion=dlswNodeVersion, dlswCompliances=dlswCompliances)
| 178.269774
| 9,756
| 0.788575
|
#
# PySNMP MIB module DLSW-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DLSW-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:07:01 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
sdlcLSAddress, = mibBuilder.importSymbols("SNA-SDLC-MIB", "sdlcLSAddress")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Gauge32, MibIdentifier, Counter64, IpAddress, ModuleIdentity, mib_2, TimeTicks, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, Bits, Unsigned32, ObjectIdentity, NotificationType, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "MibIdentifier", "Counter64", "IpAddress", "ModuleIdentity", "mib-2", "TimeTicks", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "Bits", "Unsigned32", "ObjectIdentity", "NotificationType", "iso")
TruthValue, TextualConvention, RowPointer, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "RowPointer", "RowStatus", "DisplayString")
dlsw = ModuleIdentity((1, 3, 6, 1, 2, 1, 46))
if mibBuilder.loadTexts: dlsw.setLastUpdated('9606040900Z')
if mibBuilder.loadTexts: dlsw.setOrganization('AIW DLSw MIB RIGLET and IETF DLSw MIB Working Group')
if mibBuilder.loadTexts: dlsw.setContactInfo('David D. Chen IBM Corporation 800 Park, Highway 54 Research Triangle Park, NC 27709-9990 Tel: 1 919 254 6182 E-mail: dchen@vnet.ibm.com')
if mibBuilder.loadTexts: dlsw.setDescription('This MIB module contains objects to manage Data Link Switches.')
dlswMIB = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1))
dlswDomains = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 2))
class NBName(TextualConvention, OctetString):
description = "Represents a single qualified NetBIOS name, which can include `don't care' and `wildcard' characters to represent a number of real NetBIOS names. If an individual character position in the qualified name contains a `?', the corresponding character position in a real NetBIOS name is a `don't care'. If the qualified name ends in `*', the remainder of a real NetBIOS name is a `don't care'. `*' is only considered a wildcard if it appears at the end of a name."
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 16)
class MacAddressNC(TextualConvention, OctetString):
description = 'Represents an 802 MAC address represented in non-canonical format. That is, the most significant bit will be transmitted first. If this information is not available, the value is a zero length string.'
status = 'current'
displayHint = '1x:'
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(6, 6), )
class TAddress(TextualConvention, OctetString):
description = 'Denotes a transport service address. For dlswTCPDomain, a TAddress is 4 octets long, containing the IP-address in network-byte order.'
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 255)
class EndStationLocation(TextualConvention, Integer32):
description = 'Representing the location of an end station related to the managed DLSw node.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("other", 1), ("internal", 2), ("remote", 3), ("local", 4))
class DlcType(TextualConvention, Integer32):
description = 'Representing the type of DLC of an end station, if applicable.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("other", 1), ("na", 2), ("llc", 3), ("sdlc", 4), ("qllc", 5))
class LFSize(TextualConvention, Integer32):
description = 'The largest size of the INFO field (including DLC header, not including any MAC-level or framing octets). 64 valid values as defined by the IEEE 802.1D Addendum are acceptable.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(516, 635, 754, 873, 993, 1112, 1231, 1350, 1470, 1542, 1615, 1688, 1761, 1833, 1906, 1979, 2052, 2345, 2638, 2932, 3225, 3518, 3812, 4105, 4399, 4865, 5331, 5798, 6264, 6730, 7197, 7663, 8130, 8539, 8949, 9358, 9768, 10178, 10587, 10997, 11407, 12199, 12992, 13785, 14578, 15370, 16163, 16956, 17749, 20730, 23711, 26693, 29674, 32655, 38618, 41600, 44591, 47583, 50575, 53567, 56559, 59551, 65535))
namedValues = NamedValues(("lfs516", 516), ("lfs635", 635), ("lfs754", 754), ("lfs873", 873), ("lfs993", 993), ("lfs1112", 1112), ("lfs1231", 1231), ("lfs1350", 1350), ("lfs1470", 1470), ("lfs1542", 1542), ("lfs1615", 1615), ("lfs1688", 1688), ("lfs1761", 1761), ("lfs1833", 1833), ("lfs1906", 1906), ("lfs1979", 1979), ("lfs2052", 2052), ("lfs2345", 2345), ("lfs2638", 2638), ("lfs2932", 2932), ("lfs3225", 3225), ("lfs3518", 3518), ("lfs3812", 3812), ("lfs4105", 4105), ("lfs4399", 4399), ("lfs4865", 4865), ("lfs5331", 5331), ("lfs5798", 5798), ("lfs6264", 6264), ("lfs6730", 6730), ("lfs7197", 7197), ("lfs7663", 7663), ("lfs8130", 8130), ("lfs8539", 8539), ("lfs8949", 8949), ("lfs9358", 9358), ("lfs9768", 9768), ("lfs10178", 10178), ("lfs10587", 10587), ("lfs10997", 10997), ("lfs11407", 11407), ("lfs12199", 12199), ("lfs12992", 12992), ("lfs13785", 13785), ("lfs14578", 14578), ("lfs15370", 15370), ("lfs16163", 16163), ("lfs16956", 16956), ("lfs17749", 17749), ("lfs20730", 20730), ("lfs23711", 23711), ("lfs26693", 26693), ("lfs29674", 29674), ("lfs32655", 32655), ("lfs38618", 38618), ("lfs41600", 41600), ("lfs44591", 44591), ("lfs47583", 47583), ("lfs50575", 50575), ("lfs53567", 53567), ("lfs56559", 56559), ("lfs59551", 59551), ("lfs65535", 65535))
null = MibIdentifier((0, 0))
dlswTCPDomain = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 2, 1))
class DlswTCPAddress(TextualConvention, OctetString):
description = 'Represents the IP address of a DLSw which uses TCP as a transport protocol.'
status = 'current'
displayHint = '1d.1d.1d.1d'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
dlswNode = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 1))
dlswTConn = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 2))
dlswInterface = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 3))
dlswDirectory = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 4))
dlswCircuit = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 5))
dlswSdlc = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 6))
dlswNodeVersion = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswNodeVersion.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswNodeVersion.setStatus('current')
if mibBuilder.loadTexts: dlswNodeVersion.setDescription('This value identifies the particular version of the DLSw standard supported by this DLSw. The first octet is a hexadecimal value representing the DLSw standard Version number of this DLSw, and the second is a hexadecimal value representing the DLSw standard Release number. This information is reported in DLSw Capabilities Exchange.')
dlswNodeVendorID = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(3, 3)).setFixedLength(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswNodeVendorID.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswNodeVendorID.setStatus('current')
if mibBuilder.loadTexts: dlswNodeVendorID.setDescription("The value identifies the manufacturer's IEEE-assigned organizationally Unique Identifier (OUI) of this DLSw. This information is reported in DLSw Capabilities Exchange.")
dlswNodeVersionString = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswNodeVersionString.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswNodeVersionString.setStatus('current')
if mibBuilder.loadTexts: dlswNodeVersionString.setDescription('This string gives product-specific information about this DLSw (e.g., product name, code release and fix level). This flows in Capabilities Exchange messages.')
dlswNodeStdPacingSupport = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("adaptiveRcvWindow", 2), ("fixedRcvWindow", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswNodeStdPacingSupport.setStatus('current')
if mibBuilder.loadTexts: dlswNodeStdPacingSupport.setDescription('Circuit pacing, as defined in the DLSw Standard, allows each of the two DLSw nodes on a circuit to control the amount of data the other is permitted to send to them. This object reflects the level of support the DLSw node has for this protocol. (1) means the node has no support for the standard circuit pacing flows; it may use RFC 1434+ methods only, or a proprietary flow control scheme. (2) means the node supports the standard scheme and can vary the window sizes it grants as a data receiver. (3) means the node supports the standard scheme but never varies its receive window size.')
dlswNodeStatus = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("inactive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswNodeStatus.setStatus('current')
if mibBuilder.loadTexts: dlswNodeStatus.setDescription('The status of the DLSw part of the system. Changing the value from active to inactive causes DLSw to take the following actions - (1) it disconnects all circuits through all DLSw partners, (2) it disconnects all transport connections to all DLSw partners, (3) it disconnects all local DLC connections, and (4) it stops processing all DLC connection set-up traffic. Since these are destructive actions, the user should query the circuit and transport connection tables in advance to understand the effect this action will have. Changing the value from inactive to active causes DLSw to come up in its initial state, i.e., transport connections established and ready to bring up circuits.')
dlswNodeUpTime = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 6), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswNodeUpTime.setStatus('current')
if mibBuilder.loadTexts: dlswNodeUpTime.setDescription('The amount of time (in hundredths of a second) since the DLSw portion of the system was last re-initialized. That is, if dlswState is in the active state, the time the dlswState entered the active state. It will remain zero if dlswState is in the inactive state.')
dlswNodeVirtualSegmentLFSize = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 7), LFSize().clone('lfs65535')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswNodeVirtualSegmentLFSize.setStatus('current')
if mibBuilder.loadTexts: dlswNodeVirtualSegmentLFSize.setDescription('The largest frame size (including DLC header and info field but not any MAC-level or framing octets) this DLSw can forward on any path through itself. This object can represent any box- level frame size forwarding restriction (e.g., from the use of fixed-size buffers). Some DLSw implementations will have no such restriction. This value will affect the LF size of circuits during circuit creation. The LF size of an existing circuit can be found in the RIF (Routing Information Field).')
dlswNodeResourceNBExclusivity = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 8), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswNodeResourceNBExclusivity.setStatus('current')
if mibBuilder.loadTexts: dlswNodeResourceNBExclusivity.setDescription('The value of true indicates that the NetBIOS Names configured in dlswDirNBTable are the only ones accessible via this DLSw. If a node supports sending run-time capabilities exchange messages, changes to this object should cause that action. It is up to the implementation exactly when to start the run-time capabilities exchange.')
dlswNodeResourceMacExclusivity = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 9), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswNodeResourceMacExclusivity.setStatus('current')
if mibBuilder.loadTexts: dlswNodeResourceMacExclusivity.setDescription('The value of true indicates that the MAC addresses configured in the dlswDirMacTable are the only ones accessible via this DLSw. If a node supports sending run-time capabilities exchange messages, changes to this object should cause that action. It is up to the implementation exactly when to start the run-time capabilities exchange.')
dlswTConnStat = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 2, 1))
dlswTConnStatActiveConnections = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 2, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnStatActiveConnections.setStatus('current')
if mibBuilder.loadTexts: dlswTConnStatActiveConnections.setDescription("The number of transport connections that are not in `disconnected' state.")
dlswTConnStatCloseIdles = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnStatCloseIdles.setStatus('current')
if mibBuilder.loadTexts: dlswTConnStatCloseIdles.setDescription('The number of times transport connections in this node exited the connected state with zero active circuits on the transport connection.')
dlswTConnStatCloseBusys = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnStatCloseBusys.setStatus('current')
if mibBuilder.loadTexts: dlswTConnStatCloseBusys.setDescription('The number of times transport connections in this node exited the connected state with some non-zero number of active circuits on the transport connection. Normally this means the transport connection failed unexpectedly.')
dlswTConnConfigTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 2, 2), )
if mibBuilder.loadTexts: dlswTConnConfigTable.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigTable.setDescription("This table defines the transport connections that will be initiated or accepted by this DLSw. Structure of masks allows wildcard definition for a collection of transport connections by a conceptual row. For a specific transport connection, there may be multiple of conceptual rows match the transport address. The `best' match will the one to determine the characteristics of the transport connection.")
dlswTConnConfigEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1), ).setIndexNames((0, "DLSW-MIB", "dlswTConnConfigIndex"))
if mibBuilder.loadTexts: dlswTConnConfigEntry.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigEntry.setDescription('Each conceptual row defines a collection of transport connections.')
dlswTConnConfigIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dlswTConnConfigIndex.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigIndex.setDescription('The index to the conceptual row of the table. Negative numbers are not allowed. There are objects defined that point to conceptual rows of this table with this index value. Zero is used to denote that no corresponding row exists. Index values are assigned by the agent, and should not be reused but should continue to increase in value.')
dlswTConnConfigTDomain = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 2), ObjectIdentifier()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigTDomain.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigTDomain.setDescription('The object identifier which indicates the transport domain of this conceptual row.')
dlswTConnConfigLocalTAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 3), TAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigLocalTAddr.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigLocalTAddr.setDescription('The local transport address for this conceptual row of the transport connection definition.')
dlswTConnConfigRemoteTAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 4), TAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigRemoteTAddr.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigRemoteTAddr.setDescription('The remote transport address. Together with dlswTConnConfigEntryType and dlswTConnConfigGroupDefinition, the object instance of this conceptual row identifies a collection of the transport connections that will be either initiated by this DLSw or initiated by a partner DLSw and accepted by this DLSw.')
dlswTConnConfigLastModifyTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 5), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnConfigLastModifyTime.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigLastModifyTime.setDescription('The time (in hundredths of a second) since the value of any object in this conceptual row except for dlswTConnConfigOpens was last changed. This value may be compared to dlswTConnOperConnectTime to determine whether values in this row are completely valid for a transport connection created using this row definition.')
dlswTConnConfigEntryType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("individual", 1), ("global", 2), ("group", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigEntryType.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigEntryType.setDescription("The object instance signifies the type of entry in the associated conceptual row. The value of `individual' means that the entry applies to a specific partner DLSw node as identified by dlswTConnConfigRemoteTAddr and dlswTConnConfigTDomain. The value of `global' means that the entry applies to all partner DLSw nodes of the TDomain. The value of 'group' means that the entry applies to a specific set of DLSw nodes in the TDomain. Any group definitions are enterprise-specific and are pointed to by dlswTConnConfigGroupDefinition. In the cases of `global' and `group', the value in dlswTConnConfigRemoteTAddr may not have any significance.")
dlswTConnConfigGroupDefinition = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 7), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigGroupDefinition.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigGroupDefinition.setDescription("For conceptual rows of `individual' and `global' as specified in dlswTConnConfigEntryType, the instance of this object is `0.0'. For conceptual rows of `group', the instance points to the specific group definition.")
dlswTConnConfigSetupType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("activePersistent", 2), ("activeOnDemand", 3), ("passive", 4), ("excluded", 5))).clone('passive')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigSetupType.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigSetupType.setDescription('This value of the instance of a conceptual row identifies the behavior of the collection of transport connections that this conceptual row defines. The value of activePersistent, activeOnDemand and passive means this DLSw will accept any transport connections, initiated by partner DLSw nodes, which are defined by this conceptual row. The value of activePersistent means this DLSw will also initiate the transport connections of this conceptual row and retry periodically if necessary. The value of activeOnDemand means this DLSw will initiate a transport connection of this conceptual row, if there is a directory cache hits. The value of other is implementation specific. The value of exclude means that the specified node is not allowed to be a partner to this DLSw node. To take a certain conceptual row definition out of service, a value of notInService for dlswTConnConfigRowStatus should be used.')
dlswTConnConfigSapList = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16).clone(hexValue="AA000000000000000000000000000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigSapList.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigSapList.setDescription('The SAP list indicates which SAPs are advertised to the transport connection defined by this conceptual row. Only SAPs with even numbers are represented, in the form of the most significant bit of the first octet representing the SAP 0, the next most significant bit representing the SAP 2, to the least significant bit of the last octet representing the SAP 254. Data link switching is allowed for those SAPs which have one in its corresponding bit, not allowed otherwise. The whole SAP list has to be changed together. Changing the SAP list affects only new circuit establishments and has no effect on established circuits. This list can be used to restrict specific partners from knowing about all the SAPs used by DLSw on all its interfaces (these are represented in dlswIfSapList for each interface). For instance, one may want to run NetBIOS with some partners but not others. If a node supports sending run-time capabilities exchange messages, changes to this object should cause that action. When to start the run-time capabilities exchange is implementation-specific. The DEFVAL below indicates support for SAPs 0, 4, 8, and C.')
dlswTConnConfigAdvertiseMacNB = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 10), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigAdvertiseMacNB.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigAdvertiseMacNB.setDescription('The value of true indicates that any defined local MAC addresses and NetBIOS names will be advertised to a partner node via initial and (if supported) run-time capabilities exchange messages. The DLSw node should send the appropriate exclusivity control vector to accompany each list it sends, or to represent that the node is explicitly configured to have a null list. The value of false indicates that the DLSw node should not send a MAC address list or NetBIOS name list, and should also not send their corresponding exclusivity control vectors.')
dlswTConnConfigInitCirRecvWndw = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(1)).setUnits('SSP messages').setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigInitCirRecvWndw.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigInitCirRecvWndw.setDescription('The initial circuit receive pacing window size, in the unit of SSP messages, to be used for future transport connections activated using this table row. The managed node sends this value as its initial receive pacing window in its initial capabilities exchange message. Changing this value does not affect the initial circuit receive pacing window size of currently active transport connections. If the standard window pacing scheme is not supported, the value is zero. A larger receive window value may be appropriate for partners that are reachable only via physical paths that have longer network delays.')
dlswTConnConfigOpens = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnConfigOpens.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigOpens.setDescription('Number of times transport connections entered connected state according to the definition of this conceptual row.')
dlswTConnConfigRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 2, 1, 13), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnConfigRowStatus.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigRowStatus.setDescription('This object is used by the manager to create or delete the row entry in the dlswTConnConfigTable following the RowStatus textual convention. The value of notInService will be used to take a conceptual row definition out of use.')
dlswTConnOperTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 2, 3), )
if mibBuilder.loadTexts: dlswTConnOperTable.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperTable.setDescription('A list of transport connections. It is optional but desirable for the agent to keep an entry for some period of time after the transport connection is disconnected. This allows the manager to capture additional useful information about the connection, in particular, statistical information and the cause of the disconnection.')
dlswTConnOperEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1), ).setIndexNames((0, "DLSW-MIB", "dlswTConnOperTDomain"), (0, "DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTConnOperEntry.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperEntry.setDescription('')
dlswTConnOperTDomain = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 1), ObjectIdentifier())
if mibBuilder.loadTexts: dlswTConnOperTDomain.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperTDomain.setDescription('The object identifier indicates the transport domain of this transport connection.')
dlswTConnOperLocalTAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 2), TAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperLocalTAddr.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperLocalTAddr.setDescription('The local transport address for this transport connection. This value could be different from dlswTConnConfigLocalAddr, if the value of the latter were changed after this transport connection was established.')
dlswTConnOperRemoteTAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 3), TAddress())
if mibBuilder.loadTexts: dlswTConnOperRemoteTAddr.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperRemoteTAddr.setDescription('The remote transport address of this transport connection.')
dlswTConnOperEntryTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 4), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperEntryTime.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperEntryTime.setDescription('The amount of time (in hundredths of a second) since this transport connection conceptual row was created.')
dlswTConnOperConnectTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 5), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperConnectTime.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperConnectTime.setDescription("The amount of time (in hundredths of a second) since this transport connection last entered the 'connected' state. A value of zero means this transport connection has never been established.")
dlswTConnOperState = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("connecting", 1), ("initCapExchange", 2), ("connected", 3), ("quiescing", 4), ("disconnecting", 5), ("disconnected", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswTConnOperState.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperState.setDescription("The state of this transport connection. The transport connection enters `connecting' state when DLSw makes a connection request to the transport layer. Once initial Capabilities Exchange is sent, the transport connection enters enters `initCapExchange' state. When partner capabilities have been determined and the transport connection is ready for sending CanUReach (CUR) messages, it moves to the `connected' state. When DLSw is in the process of bringing down the connection, it is in the `disconnecting' state. When the transport layer indicates one of its connections is disconnected, the transport connection moves to the `disconnected' state. Whereas all of the values will be returned in response to a management protocol retrieval operation, only two values may be specified in a management protocol set operation: `quiescing' and `disconnecting'. Changing the value to `quiescing' prevents new circuits from being established, and will cause a transport disconnect when the last circuit on the connection goes away. Changing the value to `disconnecting' will force off all circuits immediately and bring the connection to `disconnected' state.")
dlswTConnOperConfigIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperConfigIndex.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperConfigIndex.setDescription('The value of dlswTConnConfigIndex of the dlswTConnConfigEntry that governs the configuration information used by this dlswTConnOperEntry. The manager can therefore normally examine both configured and operational information for this transport connection. This value is zero if the corresponding dlswTConnConfigEntry was deleted after the creation of this dlswTConnOperEntry. If some fields in the former were changed but the conceptual row was not deleted, some configuration information may not be valid for this operational transport connection. The manager can compare dlswTConnOperConnectTime and dlswTConnConfigLastModifyTime to determine if this condition exists.')
dlswTConnOperFlowCntlMode = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("undetermined", 1), ("pacing", 2), ("other", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperFlowCntlMode.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperFlowCntlMode.setDescription('The flow control mechanism in use on this transport connection. This value is undetermined (1) before the mode of flow control can be established on a new transport connection (i.e., after CapEx is sent but before Capex or other SSP control messages have been received). Pacing (2) indicates that the standard RFC 1795 pacing mechanism is in use. Other (3) may be either the RFC 1434+ xBusy mechanism operating to a back-level DLSw, or a vendor-specific flow control method. Whether it is xBusy or not can be inferred from dlswTConnOperPartnerVersion.')
dlswTConnOperPartnerVersion = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 9), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(2, 2), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerVersion.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswTConnOperPartnerVersion.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerVersion.setDescription("This value identifies which version (first octet) and release (second octet) of the DLSw standard is supported by this partner DLSw. This information is obtained from a DLSw capabilities exchange message received from the partner DLSw. A string of zero length is returned before a Capabilities Exchange message is received, or if one is never received. A conceptual row with a dlswTConnOperState of `connected' but a zero length partner version indicates that the partner is a non-standard DLSw partner. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerVendorID = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 10), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(3, 3), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerVendorID.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerVendorID.setDescription("This value identifies the IEEE-assigned organizationally Unique Identifier (OUI) of the maker of this partner DLSw. This information is obtained from a DLSw capabilities exchange message received from the partner DLSw. A string of zero length is returned before a Capabilities Exchange message is received, or if one is never received. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerVersionStr = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 253))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerVersionStr.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswTConnOperPartnerVersionStr.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerVersionStr.setDescription("This value identifies the particular product version (e.g., product name, code level, fix level) of this partner DLSw. The format of the actual version string is vendor-specific. This information is obtained from a DLSw capabilities exchange message received from the partner DLSw. A string of zero length is returned before a Capabilities Exchange message is received, if one is never received, or if one is received but it does not contain a version string. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerInitPacingWndw = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerInitPacingWndw.setReference('DLSW: Switch-to-Switch Protocol RFC 1795')
if mibBuilder.loadTexts: dlswTConnOperPartnerInitPacingWndw.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerInitPacingWndw.setDescription("The value of the partner initial receive pacing window. This is our initial send pacing window for all new circuits on this transport connection, as modified and granted by the first flow control indication the partner sends on each circuit. This information is obtained from a DLSw capabilities exchange message received from the partner DLSw. A value of zero is returned before a Capabilities Exchange message is received, or if one is never received. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerSapList = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 13), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(16, 16), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerSapList.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerSapList.setDescription("The Supported SAP List received in the capabilities exchange message from the partner DLSw. This list has the same format described for dlswTConnConfigSapList. A string of zero length is returned before a Capabilities Exchange message is received, or if one is never received. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerNBExcl = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 14), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerNBExcl.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerNBExcl.setDescription("The value of true signifies that the NetBIOS names received from this partner in the NetBIOS name list in its capabilities exchange message are the only NetBIOS names reachable by that partner. `False' indicates that other NetBIOS names may be reachable. `False' should be returned before a Capabilities Exchange message is received, if one is never received, or if one is received without a NB Name Exclusivity CV. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerMacExcl = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 15), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerMacExcl.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerMacExcl.setDescription("The value of true signifies that the MAC addresses received from this partner in the MAC address list in its capabilities exchange message are the only MAC addresses reachable by that partner. `False' indicates that other MAC addresses may be reachable. `False' should be returned before a Capabilities Exchange message is received, if one is never received, or if one is received without a MAC Address Exclusivity CV. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerNBInfo = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("partial", 2), ("complete", 3), ("notApplicable", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerNBInfo.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerNBInfo.setDescription("It is up to this DSLw whether to keep either none, some, or all of the NetBIOS name list that was received in the capabilities exchange message sent by this partner DLSw. This object identifies how much information was kept by this DLSw. These names are stored as userConfigured remote entries in dlswDirNBTable. A value of (4), notApplicable, should be returned before a Capabilities Exchange message is received, or if one is never received. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperPartnerMacInfo = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("partial", 2), ("complete", 3), ("notApplicable", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperPartnerMacInfo.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperPartnerMacInfo.setDescription("It is up to this DLSw whether to keep either none, some, or all of the MAC address list that was received in the capabilities exchange message sent by this partner DLSw. This object identifies how much information was kept by this DLSw. These names are stored as userConfigured remote entries in dlswDirMACTable. A value of (4), notApplicable, should be returned before a Capabilities Exchange message is received, or if one is never received. If an implementation chooses to keep dlswTConnOperEntrys in the `disconnected' state, this value should remain unchanged.")
dlswTConnOperDiscTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 18), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperDiscTime.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperDiscTime.setDescription("The amount of time (in hundredths of a second) since the dlswTConnOperState last entered `disconnected' state.")
dlswTConnOperDiscReason = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("other", 1), ("capExFailed", 2), ("transportLayerDisc", 3), ("operatorCommand", 4), ("lastCircuitDiscd", 5), ("protocolError", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperDiscReason.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperDiscReason.setDescription('This object signifies the reason that either prevented the transport connection from entering the connected state, or caused the transport connection to enter the disconnected state.')
dlswTConnOperDiscActiveCir = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperDiscActiveCir.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperDiscActiveCir.setDescription('The number of circuits active (not in DISCONNECTED state) at the time the transport connection was last disconnected. This value is zero if the transport connection has never been connected.')
dlswTConnOperInDataPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 21), Counter32()).setUnits('SSP messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperInDataPkts.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperInDataPkts.setDescription('The number of Switch-to-Switch Protocol (SSP) messages of type DGRMFRAME, DATAFRAME, or INFOFRAME received on this transport connection.')
dlswTConnOperOutDataPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 22), Counter32()).setUnits('SSP messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperOutDataPkts.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperOutDataPkts.setDescription('The number of Switch-to-Switch Protocol (SSP) messages of type DGRMFRAME, DATAFRAME, or INFOFRAME transmitted on this transport connection.')
dlswTConnOperInDataOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 23), Counter32()).setUnits('octets').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperInDataOctets.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperInDataOctets.setDescription('The number octets in Switch-to-Switch Protocol (SSP) messages of type DGRMFRAME, DATAFRAME, or INFOFRAME received on this transport connection. Each message is counted starting with the first octet following the SSP message header.')
dlswTConnOperOutDataOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 24), Counter32()).setUnits('octets').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperOutDataOctets.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperOutDataOctets.setDescription('The number octets in Switch-to-Switch Protocol (SSP) messages of type DGRMFRAME, DATAFRAME, or INFOFRAME transmitted on this transport connection. Each message is counted starting with the first octet following the SSP message header.')
dlswTConnOperInCntlPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 25), Counter32()).setUnits('SSP messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperInCntlPkts.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperInCntlPkts.setDescription('The number of Switch-to-Switch Protocol (SSP) messages received on this transport connection which were not of type DGRMFRAME, DATAFRAME, or INFOFRAME.')
dlswTConnOperOutCntlPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 26), Counter32()).setUnits('SSP messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperOutCntlPkts.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperOutCntlPkts.setDescription('The number of Switch-to-Switch Protocol (SSP) messages of transmitted on this transport connection which were not of type DGRMFRAME, DATAFRAME, or INFOFRAME.')
dlswTConnOperCURexSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperCURexSents.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperCURexSents.setDescription('The number of CanUReach_ex messages sent on this transport connection.')
dlswTConnOperICRexRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperICRexRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperICRexRcvds.setDescription('The number of ICanReach_ex messages received on this transport connection.')
dlswTConnOperCURexRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperCURexRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperCURexRcvds.setDescription('The number of CanUReach_ex messages received on this transport connection.')
dlswTConnOperICRexSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperICRexSents.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperICRexSents.setDescription('The number of ICanReach_ex messages sent on this transport connection.')
dlswTConnOperNQexSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperNQexSents.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperNQexSents.setDescription('The number of NetBIOS_NQ_ex (NetBIOS Name Query-explorer) messages sent on this transport connection.')
dlswTConnOperNRexRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperNRexRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperNRexRcvds.setDescription('The number of NETBIOS_NR_ex (NetBIOS Name Recognized-explorer) messages received on this transport connection.')
dlswTConnOperNQexRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 33), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperNQexRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperNQexRcvds.setDescription('The number of NETBIOS_NQ_ex messages received on this transport connection.')
dlswTConnOperNRexSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 34), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperNRexSents.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperNRexSents.setDescription('The number of NETBIOS_NR_ex messages sent on this transport connection.')
dlswTConnOperCirCreates = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 35), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperCirCreates.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperCirCreates.setDescription("The number of times that circuits entered `circuit_established' state (not counting transitions from `circuit_restart').")
dlswTConnOperCircuits = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 3, 1, 36), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnOperCircuits.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperCircuits.setDescription("The number of currently active circuits on this transport connection, where `active' means not in `disconnected' state.")
dlswTConnSpecific = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 2, 4))
dlswTConnTcp = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1))
dlswTConnTcpConfigTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 1), )
if mibBuilder.loadTexts: dlswTConnTcpConfigTable.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigTable.setDescription('This table defines the TCP transport connections that will be either initiated by or accepted by this DSLw. It augments the entries in dlswTConnConfigTable whose domain is dlswTCPDomain.')
dlswTConnTcpConfigEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 1, 1), ).setIndexNames((0, "DLSW-MIB", "dlswTConnConfigIndex"))
if mibBuilder.loadTexts: dlswTConnTcpConfigEntry.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigEntry.setDescription('Each conceptual row defines parameters that are specific to dlswTCPDomain transport connections.')
dlswTConnTcpConfigKeepAliveInt = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1800))).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnTcpConfigKeepAliveInt.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigKeepAliveInt.setDescription('The time in seconds between TCP keepAlive messages when no traffic is flowing. Zero signifies no keepAlive protocol. Changes take effect only for new TCP connections.')
dlswTConnTcpConfigTcpConnections = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnTcpConfigTcpConnections.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigTcpConnections.setDescription('This is our preferred number of TCP connections within a TCP transport connection. The actual number used is negotiated at capabilities exchange time. Changes take effect only for new transport connections.')
dlswTConnTcpConfigMaxSegmentSize = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(4096)).setUnits('packets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswTConnTcpConfigMaxSegmentSize.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigMaxSegmentSize.setDescription('This is the number of bytes that this node is willing to receive over the read TCP connection(s). Changes take effect for new transport connections.')
dlswTConnTcpOperTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 2), )
if mibBuilder.loadTexts: dlswTConnTcpOperTable.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperTable.setDescription('A list of TCP transport connections. It is optional but desirable for the agent to keep an entry for some period of time after the transport connection is disconnected. This allows the manager to capture additional useful information about the connection, in particular, statistical information and the cause of the disconnection.')
dlswTConnTcpOperEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 2, 1), ).setIndexNames((0, "DLSW-MIB", "dlswTConnOperTDomain"), (0, "DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTConnTcpOperEntry.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperEntry.setDescription('')
dlswTConnTcpOperKeepAliveInt = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1800))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnTcpOperKeepAliveInt.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperKeepAliveInt.setDescription('The time in seconds between TCP keepAlive messages when no traffic is flowing. Zero signifies no keepAlive protocol is operating.')
dlswTConnTcpOperPrefTcpConnections = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnTcpOperPrefTcpConnections.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperPrefTcpConnections.setDescription('This is the number of TCP connections preferred by this DLSw partner, as received in its capabilities exchange message.')
dlswTConnTcpOperTcpConnections = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 2, 4, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswTConnTcpOperTcpConnections.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperTcpConnections.setDescription('This is the actual current number of TCP connections within this transport connection.')
dlswIfTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 3, 1), )
if mibBuilder.loadTexts: dlswIfTable.setStatus('current')
if mibBuilder.loadTexts: dlswIfTable.setDescription('The list of interfaces on which DLSw is active.')
dlswIfEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 3, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: dlswIfEntry.setStatus('current')
if mibBuilder.loadTexts: dlswIfEntry.setDescription('')
dlswIfRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 3, 1, 1, 1), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswIfRowStatus.setStatus('current')
if mibBuilder.loadTexts: dlswIfRowStatus.setDescription('This object is used by the manager to create or delete the row entry in the dlswIfTable following the RowStatus textual convention.')
dlswIfVirtualSegment = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 4095), ValueRangeConstraint(65535, 65535), )).clone(65535)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswIfVirtualSegment.setStatus('current')
if mibBuilder.loadTexts: dlswIfVirtualSegment.setDescription('The segment number that uniquely identifies the virtual segment to which this DLSw interface is connected. Current source routing protocols limit this value to the range 0 - 4095. (The value 0 is used by some management applications for special test cases.) A value of 65535 signifies that no virtual segment is assigned to this interface. For instance, in a non-source routing environment, segment number assignment is not required.')
dlswIfSapList = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 3, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16).clone(hexValue="AA000000000000000000000000000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswIfSapList.setStatus('current')
if mibBuilder.loadTexts: dlswIfSapList.setDescription('The SAP list indicates which SAPs are allowed to be data link switched through this interface. This list has the same format described for dlswTConnConfigSapList. When changes to this object take effect is implementation- specific. Turning off a particular SAP can destroy active circuits that are using that SAP. An agent implementation may reject such changes until there are no active circuits if it so chooses. In this case, it is up to the manager to close the circuits first, using dlswCircuitState. The DEFVAL below indicates support for SAPs 0, 4, 8, and C.')
dlswDirStat = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 4, 1))
dlswDirMacEntries = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirMacEntries.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacEntries.setDescription('The current total number of entries in the dlswDirMacTable.')
dlswDirMacCacheHits = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirMacCacheHits.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacCacheHits.setDescription('The number of times a cache search for a particular MAC address resulted in success.')
dlswDirMacCacheMisses = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirMacCacheMisses.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacCacheMisses.setDescription('The number of times a cache search for a particular MAC address resulted in failure.')
dlswDirMacCacheNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirMacCacheNextIndex.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacCacheNextIndex.setDescription('The next value of dlswDirMacIndex to be assigned by the agent. A retrieval of this object atomically reserves the returned value for use by the manager to create a row in dlswDirMacTable. This makes it possible for the agent to control the index space of the MAC address cache, yet allows the manager to administratively create new rows.')
dlswDirNBEntries = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirNBEntries.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBEntries.setDescription('The current total number of entries in the dlswDirNBTable.')
dlswDirNBCacheHits = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirNBCacheHits.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBCacheHits.setDescription('The number of times a cache search for a particular NetBIOS name resulted in success.')
dlswDirNBCacheMisses = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirNBCacheMisses.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBCacheMisses.setDescription('The number of times a cache search for a particular NetBIOS name resulted in failure.')
dlswDirNBCacheNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 4, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirNBCacheNextIndex.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBCacheNextIndex.setDescription('The next value of dlswDirNBIndex to be assigned by the agent. A retrieval of this object atomically reserves the returned value for use by the manager to create a row in dlswDirNBTable. This makes it possible for the agent to control the index space for the NetBIOS name cache, yet allows the manager to administratively create new rows.')
dlswDirCache = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 4, 2))
dlswDirMacTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1), )
if mibBuilder.loadTexts: dlswDirMacTable.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacTable.setDescription('This table contains locations of MAC addresses. They could be either verified or not verified, local or remote, and configured locally or learned from either Capabilities Exchange messages or directory searches.')
dlswDirMacEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1), ).setIndexNames((0, "DLSW-MIB", "dlswDirMacIndex"))
if mibBuilder.loadTexts: dlswDirMacEntry.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacEntry.setDescription('Indexed by dlswDirMacIndex.')
dlswDirMacIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dlswDirMacIndex.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacIndex.setDescription('Uniquely identifies a conceptual row of this table.')
dlswDirMacMac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 2), MacAddressNC()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacMac.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacMac.setDescription('The MAC address, together with the dlswDirMacMask, specifies a set of MAC addresses that are defined or discovered through an interface or partner DLSw nodes.')
dlswDirMacMask = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 3), MacAddressNC().clone(hexValue="FFFFFFFFFFFF")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacMask.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacMask.setDescription('The MAC address mask, together with the dlswDirMacMac, specifies a set of MAC addresses that are defined or discovered through an interface or partner DLSw nodes.')
dlswDirMacEntryType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("userConfiguredPublic", 2), ("userConfiguredPrivate", 3), ("partnerCapExMsg", 4), ("dynamic", 5))).clone('userConfiguredPublic')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacEntryType.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacEntryType.setDescription('The cause of the creation of this conceptual row. It could be one of the three methods: (1) user configured, including via management protocol set operations, configuration file, command line or equivalent methods; (2) learned from the partner DLSw Capabilities Exchange messages; and (3) dynamic, e.g., learned from ICanReach messages, or LAN explorer frames. Since only individual MAC addresses can be dynamically learned, dynamic entries will all have a mask of all FFs. The public versus private distinction for user- configured resources applies only to local resources (UC remote resources are private), and indicates whether that resource should be advertised in capabilities exchange messages sent by this node.')
dlswDirMacLocationType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("local", 2), ("remote", 3))).clone('local')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacLocationType.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacLocationType.setDescription('The location of the resource (or a collection of resources using a mask) of this conceptual row is either (1) local - the resource is reachable via an interface, or (2) remote - the resource is reachable via a partner DLSw node (or a set of partner DLSw nodes).')
dlswDirMacLocation = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 6), RowPointer().clone((0, 0))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacLocation.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacLocation.setDescription('Points to either the ifEntry, dlswTConnConfigEntry, dlswTConnOperEntry, 0.0, or something that is implementation specific. It identifies the location of the MAC address (or the collection of MAC addresses.)')
dlswDirMacStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("reachable", 2), ("notReachable", 3))).clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacStatus.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacStatus.setDescription("This object specifies whether DLSw currently believes the MAC address to be accessible at the specified location. The value `notReachable' allows a configured resource definition to be taken out of service when a search to that resource fails (avoiding a repeat of the search).")
dlswDirMacLFSize = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 8), LFSize().clone('lfs65535')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacLFSize.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacLFSize.setDescription('The largest size of the MAC INFO field (LLC header and data) that a circuit to the MAC address can carry through this path.')
dlswDirMacRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 1, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirMacRowStatus.setStatus('current')
if mibBuilder.loadTexts: dlswDirMacRowStatus.setDescription('This object is used by the manager to create or delete the row entry in the dlswDirMacTable following the RowStatus textual convention.')
dlswDirNBTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2), )
if mibBuilder.loadTexts: dlswDirNBTable.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBTable.setDescription('This table contains locations of NetBIOS names. They could be either verified or not verified, local or remote, and configured locally or learned from either Capabilities Exchange messages or directory searches.')
dlswDirNBEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1), ).setIndexNames((0, "DLSW-MIB", "dlswDirNBIndex"))
if mibBuilder.loadTexts: dlswDirNBEntry.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBEntry.setDescription('Indexed by dlswDirNBIndex.')
dlswDirNBIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dlswDirNBIndex.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBIndex.setDescription('Uniquely identifies a conceptual row of this table.')
dlswDirNBName = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 2), NBName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBName.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBName.setDescription("The NetBIOS name (including `any char' and `wildcard' characters) specifies a set of NetBIOS names that are defined or discovered through an interface or partner DLSw nodes.")
dlswDirNBNameType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("individual", 2), ("group", 3))).clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBNameType.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBNameType.setDescription('Whether dlswDirNBName represents an (or a set of) individual or group NetBIOS name(s).')
dlswDirNBEntryType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("userConfiguredPublic", 2), ("userConfiguredPrivate", 3), ("partnerCapExMsg", 4), ("dynamic", 5))).clone('userConfiguredPublic')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBEntryType.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBEntryType.setDescription('The cause of the creation of this conceptual row. It could be one of the three methods: (1) user configured, including via management protocol set operations, configuration file, command line, or equivalent methods; (2) learned from the partner DLSw Capabilities Exchange messages; and (3) dynamic, e.g., learned from ICanReach messages, or test frames. Since only actual NetBIOS names can be dynamically learned, dynamic entries will not contain any char or wildcard characters. The public versus private distinction for user- configured resources applies only to local resources (UC remote resources are private), and indicates whether that resource should be advertised in capabilities exchange messages sent by this node.')
dlswDirNBLocationType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("local", 2), ("remote", 3))).clone('local')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBLocationType.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBLocationType.setDescription('The location of the resource (or a collection of resources using any char/wildcard characters) of this conceptual row is either (1) local - the resource is reachable via an interface, or (2) remote - the resource is reachable via a a partner DLSw node (or a set of partner DLSw nodes).')
dlswDirNBLocation = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 6), RowPointer().clone((0, 0))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBLocation.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBLocation.setDescription('Points to either the ifEntry, dlswTConnConfigEntry, dlswTConnOperEntry, 0.0, or something that is implementation specific. It identifies the location of the NetBIOS name or the set of NetBIOS names.')
dlswDirNBStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("reachable", 2), ("notReachable", 3))).clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBStatus.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBStatus.setDescription("This object specifies whether DLSw currently believes the NetBIOS name to be accessible at the specified location. The value `notReachable' allows a configured resource definition to be taken out of service when a search to that resource fails (avoiding a repeat of the search).")
dlswDirNBLFSize = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 8), LFSize().clone('lfs65535')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBLFSize.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBLFSize.setDescription('The largest size of the MAC INFO field (LLC header and data) that a circuit to the NB name can carry through this path.')
dlswDirNBRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 2, 2, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswDirNBRowStatus.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBRowStatus.setDescription('This object is used by manager to create or delete the row entry in the dlswDirNBTable following the RowStatus textual convention.')
dlswDirLocate = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 4, 3))
dlswDirLocateMacTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 1), )
if mibBuilder.loadTexts: dlswDirLocateMacTable.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateMacTable.setDescription('This table is used to retrieve all entries in the dlswDirMacTable that match a given MAC address, in the order of the best matched first, the second best matched second, and so on, till no more entries match the given MAC address.')
dlswDirLocateMacEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 1, 1), ).setIndexNames((0, "DLSW-MIB", "dlswDirLocateMacMac"), (0, "DLSW-MIB", "dlswDirLocateMacMatch"))
if mibBuilder.loadTexts: dlswDirLocateMacEntry.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateMacEntry.setDescription('Indexed by dlswDirLocateMacMac and dlswDirLocateMacMatch. The first object is the MAC address of interest, and the second object is the order in the list of all entries that match the MAC address.')
dlswDirLocateMacMac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 1, 1, 1), MacAddressNC())
if mibBuilder.loadTexts: dlswDirLocateMacMac.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateMacMac.setDescription('The MAC address to be located.')
dlswDirLocateMacMatch = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)))
if mibBuilder.loadTexts: dlswDirLocateMacMatch.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateMacMatch.setDescription('The order of the entries of dlswDirMacTable that match dlswDirLocateMacMac. A value of one represents the entry that best matches the MAC address. A value of two represents the second best matched entry, and so on.')
dlswDirLocateMacLocation = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 1, 1, 3), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirLocateMacLocation.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateMacLocation.setDescription('Points to the dlswDirMacEntry.')
dlswDirLocateNBTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 2), )
if mibBuilder.loadTexts: dlswDirLocateNBTable.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBTable.setDescription('This table is used to retrieve all entries in the dlswDirNBTable that match a given NetBIOS name, in the order of the best matched first, the second best matched second, and so on, till no more entries match the given NetBIOS name.')
dlswDirLocateNBEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 2, 1), ).setIndexNames((0, "DLSW-MIB", "dlswDirLocateNBName"), (0, "DLSW-MIB", "dlswDirLocateNBMatch"))
if mibBuilder.loadTexts: dlswDirLocateNBEntry.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBEntry.setDescription('Indexed by dlswDirLocateNBName and dlswDirLocateNBMatch. The first object is the NetBIOS name of interest, and the second object is the order in the list of all entries that match the NetBIOS name.')
dlswDirLocateNBName = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 2, 1, 1), NBName())
if mibBuilder.loadTexts: dlswDirLocateNBName.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBName.setDescription('The NetBIOS name to be located (no any char or wildcards).')
dlswDirLocateNBMatch = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)))
if mibBuilder.loadTexts: dlswDirLocateNBMatch.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBMatch.setDescription('The order of the entries of dlswDirNBTable that match dlswDirLocateNBName. A value of one represents the entry that best matches the NetBIOS name. A value of two represents the second best matched entry, and so on.')
dlswDirLocateNBLocation = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 4, 3, 2, 1, 3), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswDirLocateNBLocation.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBLocation.setDescription('Points to the dlswDirNBEntry.')
dlswCircuitStat = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 5, 1))
dlswCircuitStatActives = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 5, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitStatActives.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitStatActives.setDescription('The current number of circuits in dlswCircuitTable that are not in the disconnected state.')
dlswCircuitStatCreates = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 5, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitStatCreates.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitStatCreates.setDescription("The total number of entries ever added to dlswCircuitTable, or reactivated upon exiting `disconnected' state.")
dlswCircuitTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 5, 2), )
if mibBuilder.loadTexts: dlswCircuitTable.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitTable.setDescription('This table is the circuit representation in the DLSw entity. Virtual data links are used to represent any internal end stations. There is a conceptual row associated with each data link. Thus, for circuits without an intervening transport connection, there are two conceptual rows for each circuit. The table consists of the circuits being established, established, and as an implementation option, circuits that have been disconnected. For circuits carried over transport connections, an entry is created after the CUR_cs was sent or received. For circuits between two locally attached devices, or internal virtual MAC addresses, an entry is created when the equivalent of CUR_cs sent/received status is reached. End station 1 (S1) and End station 2 (S2) are used to represent the two end stations of the circuit. S1 is always an end station which is locally attached. S2 may be locally attached or remote. If it is locally attached, the circuit will be represented by two rows indexed by (A, B) and (B, A) where A & B are the relevant MACs/SAPs. The table may be used to store the causes of disconnection of circuits. It is recommended that the oldest disconnected circuit entry be removed from this table when the memory space of disconnected circuits is needed.')
dlswCircuitEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1), ).setIndexNames((0, "DLSW-MIB", "dlswCircuitS1Mac"), (0, "DLSW-MIB", "dlswCircuitS1Sap"), (0, "DLSW-MIB", "dlswCircuitS2Mac"), (0, "DLSW-MIB", "dlswCircuitS2Sap"))
if mibBuilder.loadTexts: dlswCircuitEntry.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitEntry.setDescription('')
dlswCircuitS1Mac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 1), MacAddressNC())
if mibBuilder.loadTexts: dlswCircuitS1Mac.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1Mac.setDescription('The MAC Address of End Station 1 (S1) used for this circuit.')
dlswCircuitS1Sap = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1))
if mibBuilder.loadTexts: dlswCircuitS1Sap.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1Sap.setDescription('The SAP at End Station 1 (S1) used for this circuit.')
dlswCircuitS1IfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS1IfIndex.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1IfIndex.setDescription('The ifEntry index of the local interface through which S1 can be reached.')
dlswCircuitS1DlcType = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 4), DlcType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS1DlcType.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1DlcType.setDescription('The DLC protocol in use between the DLSw node and S1.')
dlswCircuitS1RouteInfo = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS1RouteInfo.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1RouteInfo.setDescription('If source-route bridging is in use between the DLSw node and S1, this is the routing information field describing the path between the two devices. Otherwise the value will be an OCTET STRING of zero length.')
dlswCircuitS1CircuitId = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 6), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(8, 8), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS1CircuitId.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1CircuitId.setDescription('The Circuit ID assigned by this DLSw node to this circuit. The first four octets are the DLC port Id, and the second four octets are the Data Link Correlator. If the DLSw SSP was not used to establish this circuit, the value will be a string of zero length.')
dlswCircuitS1Dlc = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 7), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS1Dlc.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS1Dlc.setDescription('Points to a conceptual row of the underlying DLC MIB, which could either be the standard MIBs (e.g., the SDLC), or an enterprise-specific DLC MIB.')
dlswCircuitS2Mac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 8), MacAddressNC())
if mibBuilder.loadTexts: dlswCircuitS2Mac.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2Mac.setDescription('The MAC Address of End Station 2 (S2) used for this circuit.')
dlswCircuitS2Sap = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1))
if mibBuilder.loadTexts: dlswCircuitS2Sap.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2Sap.setDescription('The SAP at End Station 2 (S2) used for this circuit.')
dlswCircuitS2Location = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 10), EndStationLocation()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS2Location.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2Location.setDescription('The location of End Station 2 (S2). If the location of End Station 2 is local, the interface information will be available in the conceptual row whose S1 and S2 are the S2 and the S1 of this conceptual row, respectively.')
dlswCircuitS2TDomain = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 11), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS2TDomain.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2TDomain.setDescription('If the location of End Station 2 is remote, this value is the transport domain of the transport protocol the circuit is running over. Otherwise, the value is 0.0.')
dlswCircuitS2TAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 12), TAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS2TAddress.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2TAddress.setDescription('If the location of End Station 2 is remote, this object contains the address of the partner DLSw, else it will be an OCTET STRING of zero length.')
dlswCircuitS2CircuitId = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 13), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(8, 8), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitS2CircuitId.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitS2CircuitId.setDescription('The Circuit ID assigned to this circuit by the partner DLSw node. The first four octets are the DLC port Id, and the second four octets are the Data Link Correlator. If the DLSw SSP was not used to establish this circuit, the value will be a string of zero length.')
dlswCircuitOrigin = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("s1", 1), ("s2", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitOrigin.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitOrigin.setDescription('This object specifies which of the two end stations initiated the establishment of this circuit.')
dlswCircuitEntryTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 15), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitEntryTime.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitEntryTime.setDescription('The amount of time (in hundredths of a second) since this circuit table conceptual row was created.')
dlswCircuitStateTime = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 16), TimeTicks()).setUnits('hundredths of a second').setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitStateTime.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitStateTime.setDescription('The amount of time (in hundredths of a second) since this circuit entered the current state.')
dlswCircuitState = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))).clone(namedValues=NamedValues(("disconnected", 1), ("circuitStart", 2), ("resolvePending", 3), ("circuitPending", 4), ("circuitEstablished", 5), ("connectPending", 6), ("contactPending", 7), ("connected", 8), ("disconnectPending", 9), ("haltPending", 10), ("haltPendingNoack", 11), ("circuitRestart", 12), ("restartPending", 13)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswCircuitState.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitState.setDescription("The current state of this circuit. The agent, implementation specific, may choose to keep entries for some period of time after circuit disconnect, so the manager can gather the time and cause of disconnection. While all of the specified values may be returned from a GET operation, the only SETable value is `disconnectPending'. When this value is set, DLSw should perform the appropriate action given its previous state (e.g., send HALT_DL if the state was `connected') to bring the circuit down to the `disconnected' state. Both the partner DLSw and local end station(s) should be notified as appropriate. This MIB provides no facility to re-establish a disconnected circuit, because in DLSw this should be an end station-driven function.")
dlswCircuitPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unsupported", 1), ("low", 2), ("medium", 3), ("high", 4), ("highest", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitPriority.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitPriority.setDescription("The transmission priority of this circuit as understood by this DLSw node. This value is determined by the two DLSw nodes at circuit startup time. If this DLSw node does not support DLSw circuit priority, the value `unsupported' should be returned.")
dlswCircuitFCSendGrantedUnits = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCSendGrantedUnits.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCSendGrantedUnits.setDescription('The number of paced SSP messages that this DLSw is currently authorized to send on this circuit before it must stop and wait for an additional flow control indication from the partner DLSw. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCSendCurrentWndw = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCSendCurrentWndw.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCSendCurrentWndw.setDescription("The current window size that this DLSw is using in its role as a data sender. This is the value by which this DLSw would increase the number of messages it is authorized to send, if it were to receive a flow control indication with the bits specifying `repeat window'. The value zero should be returned if this circuit is not running the DLSw pacing protocol.")
dlswCircuitFCRecvGrantedUnits = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCRecvGrantedUnits.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCRecvGrantedUnits.setDescription('The current number of paced SSP messages that this DLSw has authorized the partner DLSw to send on this circuit before the partner DLSw must stop and wait for an additional flow control indication from this DLSw. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCRecvCurrentWndw = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCRecvCurrentWndw.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCRecvCurrentWndw.setDescription("The current window size that this DLSw is using in its role as a data receiver. This is the number of additional paced SSP messages that this DLSw would be authorizing its DLSw partner to send, if this DLSw were to send a flow control indication with the bits specifying `repeat window'. The value zero should be returned if this circuit is not running the DLSw pacing protocol.")
dlswCircuitFCLargestRecvGranted = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 23), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCLargestRecvGranted.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCLargestRecvGranted.setDescription('The largest receive window size granted by this DLSw during the current activation of this circuit. This is not the largest number of messages granted at any time, but the largest window size as represented by FCIND operator bits. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCLargestSendGranted = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 24), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCLargestSendGranted.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCLargestSendGranted.setDescription('The largest send (with respect to this DLSw) window size granted by the partner DLSw during the current activation of this circuit. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCHalveWndwSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCHalveWndwSents.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCHalveWndwSents.setDescription('The number of Halve Window operations this DLSw has sent on this circuit, in its role as a data receiver. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCResetOpSents = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCResetOpSents.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCResetOpSents.setDescription('The number of Reset Window operations this DLSw has sent on this circuit, in its role as a data receiver. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCHalveWndwRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCHalveWndwRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCHalveWndwRcvds.setDescription('The number of Halve Window operations this DLSw has received on this circuit, in its role as a data sender. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitFCResetOpRcvds = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitFCResetOpRcvds.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitFCResetOpRcvds.setDescription('The number of Reset Window operations this DLSw has received on this circuit, in its role as a data sender. The value zero should be returned if this circuit is not running the DLSw pacing protocol.')
dlswCircuitDiscReasonLocal = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("endStationDiscRcvd", 1), ("endStationDlcError", 2), ("protocolError", 3), ("operatorCommand", 4), ("haltDlRcvd", 5), ("haltDlNoAckRcvd", 6), ("transportConnClosed", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitDiscReasonLocal.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitDiscReasonLocal.setDescription('The reason why this circuit was last disconnected, as seen by this DLSw node. This object is present only if the agent keeps circuit table entries around for some period after circuit disconnect.')
dlswCircuitDiscReasonRemote = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("endStationDiscRcvd", 2), ("endStationDlcError", 3), ("protocolError", 4), ("operatorCommand", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitDiscReasonRemote.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitDiscReasonRemote.setDescription("The generic reason code why this circuit was last disconnected, as reported by the DLSw partner in a HALT_DL or HALT_DL_NOACK. If the partner does not send a reason code in these messages, or the DLSw implementation does not report receiving one, the value `unknown' is returned. This object is present only if the agent keeps circuit table entries around for some period after circuit disconnect.")
dlswCircuitDiscReasonRemoteData = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 5, 2, 1, 31), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(4, 4), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswCircuitDiscReasonRemoteData.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitDiscReasonRemoteData.setDescription('Implementation-specific data reported by the DLSw partner in a HALT_DL or HALT_DL_NOACK, to help specify how and why this circuit was last disconnected. If the partner does not send this data in these messages, or the DLSw implementation does not report receiving it, a string of zero length is returned. This object is present only if the agent keeps circuit table entries around for some period after circuit disconnect.')
dlswSdlcLsEntries = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 6, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlswSdlcLsEntries.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsEntries.setDescription('The number of entries in dlswSdlcLsTable.')
dlswSdlcLsTable = MibTable((1, 3, 6, 1, 2, 1, 46, 1, 6, 2), )
if mibBuilder.loadTexts: dlswSdlcLsTable.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsTable.setDescription('The table defines the virtual MAC addresses for those SDLC link stations that participate in data link switching.')
dlswSdlcLsEntry = MibTableRow((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SNA-SDLC-MIB", "sdlcLSAddress"))
if mibBuilder.loadTexts: dlswSdlcLsEntry.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsEntry.setDescription('The index of this table is the ifIndex value for the SDLC port which owns this link station and the poll address of the particular SDLC link station.')
dlswSdlcLsLocalMac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 1), MacAddressNC()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsLocalMac.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsLocalMac.setDescription('The virtual MAC address used to represent the SDLC-attached link station to the rest of the DLSw network.')
dlswSdlcLsLocalSap = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsLocalSap.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsLocalSap.setDescription('The SAP used to represent this link station.')
dlswSdlcLsLocalIdBlock = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 3), DisplayString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(3, 3), )).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsLocalIdBlock.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsLocalIdBlock.setDescription('The block number is the first three digits of the node_id, if available. These 3 hexadecimal digits identify the product.')
dlswSdlcLsLocalIdNum = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 4), DisplayString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(5, 5), )).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsLocalIdNum.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsLocalIdNum.setDescription('The ID number is the last 5 digits of the node_id, if available. These 5 hexadecimal digits are administratively defined and combined with the 3 digit block number form the node_id. This node_id is used to identify the local node and is included in SNA XIDs.')
dlswSdlcLsRemoteMac = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 5), MacAddressNC().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsRemoteMac.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsRemoteMac.setDescription('The MAC address to which DLSw should attempt to connect this link station. If this information is not available, a length of zero for this object should be returned.')
dlswSdlcLsRemoteSap = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 6), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(1, 1), )).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsRemoteSap.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsRemoteSap.setDescription('The SAP of the remote station to which this link station should be connected. If this information is not available, a length of zero for this object should be returned.')
dlswSdlcLsRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 46, 1, 6, 2, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dlswSdlcLsRowStatus.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcLsRowStatus.setDescription('This object is used by the manager to create or delete the row entry in the dlswSdlcLsTable following the RowStatus textual convention.')
dlswTrapControl = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 1, 10))
dlswTrapCntlTConnPartnerReject = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 10, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("partial", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswTrapCntlTConnPartnerReject.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCntlTConnPartnerReject.setDescription("Indicates whether the DLSw is permitted to emit partner reject related traps. With the value of `enabled' the DLSw will emit all partner reject related traps. With the value of `disabled' the DLSw will not emit any partner reject related traps. With the value of `partial' the DLSw will only emits partner reject traps for CapEx reject. The changes take effect immediately.")
dlswTrapCntlTConnProtViolation = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 10, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswTrapCntlTConnProtViolation.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCntlTConnProtViolation.setDescription('Indicates whether the DLSw is permitted to generate protocol-violation traps on the events such as window size violation. The changes take effect immediately.')
dlswTrapCntlTConn = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 10, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("partial", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswTrapCntlTConn.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCntlTConn.setDescription("Indicates whether the DLSw is permitted to emit transport connection up and down traps. With the value of `enabled' the DLSw will emit traps when connections enter `connected' and `disconnected' states. With the value of `disabled' the DLSw will not emit traps when connections enter of `connected' and `disconnected' states. With the value of `partial' the DLSw will only emits transport connection down traps when the connection is closed with busy. The changes take effect immediately.")
dlswTrapCntlCircuit = MibScalar((1, 3, 6, 1, 2, 1, 46, 1, 1, 10, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("partial", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dlswTrapCntlCircuit.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCntlCircuit.setDescription("Indicates whether the DLSw is permitted to generate circuit up and down traps. With the value of `enabled' the DLSw will emit traps when circuits enter `connected' and `disconnected' states. With the value of `disabled' the DLSw will not emit traps when circuits enter of `connected' and `disconnected' states. With the value of `partial' the DLSw will emit traps only for those circuits that are initiated by this DLSw, e.g., originating the CUR_CS message. The changes take effect immediately.")
dlswTraps = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 1, 0))
dlswTrapTConnPartnerReject = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 1)).setObjects(("DLSW-MIB", "dlswTConnOperTDomain"), ("DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTrapTConnPartnerReject.setStatus('current')
if mibBuilder.loadTexts: dlswTrapTConnPartnerReject.setDescription('This trap is sent each time a transport connection is rejected by a partner DLSw during Capabilities Exchanges. The emission of this trap is controlled by dlswTrapCntlTConnPartnerReject.')
dlswTrapTConnProtViolation = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 2)).setObjects(("DLSW-MIB", "dlswTConnOperTDomain"), ("DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTrapTConnProtViolation.setStatus('current')
if mibBuilder.loadTexts: dlswTrapTConnProtViolation.setDescription('This trap is sent each time a protocol violation is detected for a transport connection. The emission of this trap is controlled by dlswTrapCntlTConnProtViolation.')
dlswTrapTConnUp = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 3)).setObjects(("DLSW-MIB", "dlswTConnOperTDomain"), ("DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTrapTConnUp.setStatus('current')
if mibBuilder.loadTexts: dlswTrapTConnUp.setDescription("This trap is sent each time a transport connection enters `connected' state. The emission of this trap is controlled by dlswTrapCntlTConn.")
dlswTrapTConnDown = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 4)).setObjects(("DLSW-MIB", "dlswTConnOperTDomain"), ("DLSW-MIB", "dlswTConnOperRemoteTAddr"))
if mibBuilder.loadTexts: dlswTrapTConnDown.setStatus('current')
if mibBuilder.loadTexts: dlswTrapTConnDown.setDescription("This trap is sent each time a transport connection enters `disconnected' state. The emission of this trap is controlled by dlswTrapCntlTConn.")
dlswTrapCircuitUp = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 5)).setObjects(("DLSW-MIB", "dlswCircuitS1Mac"), ("DLSW-MIB", "dlswCircuitS1Sap"), ("DLSW-MIB", "dlswCircuitS2Mac"), ("DLSW-MIB", "dlswCircuitS2Sap"))
if mibBuilder.loadTexts: dlswTrapCircuitUp.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCircuitUp.setDescription("This trap is sent each time a circuit enters `connected' state. The emission of this trap is controlled by dlswTrapCntlCircuit.")
dlswTrapCircuitDown = NotificationType((1, 3, 6, 1, 2, 1, 46, 1, 0, 6)).setObjects(("DLSW-MIB", "dlswCircuitS1Mac"), ("DLSW-MIB", "dlswCircuitS1Sap"), ("DLSW-MIB", "dlswCircuitS2Mac"), ("DLSW-MIB", "dlswCircuitS2Sap"))
if mibBuilder.loadTexts: dlswTrapCircuitDown.setStatus('current')
if mibBuilder.loadTexts: dlswTrapCircuitDown.setDescription("This trap is sent each time a circuit enters `disconnected' state. The emission of this trap is controlled by dlswTrapCntlCircuit.")
dlswConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 3))
dlswCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 3, 1))
dlswGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 46, 3, 2))
dlswCoreCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 46, 3, 1, 1)).setObjects(("DLSW-MIB", "dlswNodeGroup"), ("DLSW-MIB", "dlswTConnStatGroup"), ("DLSW-MIB", "dlswTConnConfigGroup"), ("DLSW-MIB", "dlswTConnOperGroup"), ("DLSW-MIB", "dlswInterfaceGroup"), ("DLSW-MIB", "dlswCircuitGroup"), ("DLSW-MIB", "dlswCircuitStatGroup"), ("DLSW-MIB", "dlswNotificationGroup"), ("DLSW-MIB", "dlswNodeNBGroup"), ("DLSW-MIB", "dlswTConnNBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswCoreCompliance = dlswCoreCompliance.setStatus('current')
if mibBuilder.loadTexts: dlswCoreCompliance.setDescription('The core compliance statement for all DLSw nodes.')
dlswTConnTcpCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 46, 3, 1, 2)).setObjects(("DLSW-MIB", "dlswTConnTcpConfigGroup"), ("DLSW-MIB", "dlswTConnTcpOperGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnTcpCompliance = dlswTConnTcpCompliance.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpCompliance.setDescription('Compliance for DLSw nodes that use TCP as a transport connection protocol.')
dlswDirCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 46, 3, 1, 3)).setObjects(("DLSW-MIB", "dlswDirGroup"), ("DLSW-MIB", "dlswDirNBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirCompliance = dlswDirCompliance.setStatus('current')
if mibBuilder.loadTexts: dlswDirCompliance.setDescription('Compliance for DLSw nodes that provide a directory function.')
dlswDirLocateCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 46, 3, 1, 4)).setObjects(("DLSW-MIB", "dlswDirLocateGroup"), ("DLSW-MIB", "dlswDirLocateNBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirLocateCompliance = dlswDirLocateCompliance.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateCompliance.setDescription('Compliance for DLSw nodes that provide an ordered list of directory entries for a given resource.')
dlswSdlcCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 46, 3, 1, 5)).setObjects(("DLSW-MIB", "dlswSdlcGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswSdlcCompliance = dlswSdlcCompliance.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcCompliance.setDescription('Compliance for DLSw nodes that support SDLC.')
dlswNodeGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 1)).setObjects(("DLSW-MIB", "dlswNodeVersion"), ("DLSW-MIB", "dlswNodeVendorID"), ("DLSW-MIB", "dlswNodeVersionString"), ("DLSW-MIB", "dlswNodeStdPacingSupport"), ("DLSW-MIB", "dlswNodeStatus"), ("DLSW-MIB", "dlswNodeUpTime"), ("DLSW-MIB", "dlswNodeVirtualSegmentLFSize"), ("DLSW-MIB", "dlswNodeResourceMacExclusivity"), ("DLSW-MIB", "dlswTrapCntlTConnPartnerReject"), ("DLSW-MIB", "dlswTrapCntlTConnProtViolation"), ("DLSW-MIB", "dlswTrapCntlTConn"), ("DLSW-MIB", "dlswTrapCntlCircuit"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswNodeGroup = dlswNodeGroup.setStatus('current')
if mibBuilder.loadTexts: dlswNodeGroup.setDescription('Conformance group for DLSw node general information.')
dlswNodeNBGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 2)).setObjects(("DLSW-MIB", "dlswNodeResourceNBExclusivity"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswNodeNBGroup = dlswNodeNBGroup.setStatus('current')
if mibBuilder.loadTexts: dlswNodeNBGroup.setDescription('Conformance group for DLSw node general information specifically for nodes that support NetBIOS.')
dlswTConnStatGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 3)).setObjects(("DLSW-MIB", "dlswTConnStatActiveConnections"), ("DLSW-MIB", "dlswTConnStatCloseIdles"), ("DLSW-MIB", "dlswTConnStatCloseBusys"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnStatGroup = dlswTConnStatGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnStatGroup.setDescription('Conformance group for statistics for transport connections.')
dlswTConnConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 4)).setObjects(("DLSW-MIB", "dlswTConnConfigTDomain"), ("DLSW-MIB", "dlswTConnConfigLocalTAddr"), ("DLSW-MIB", "dlswTConnConfigRemoteTAddr"), ("DLSW-MIB", "dlswTConnConfigLastModifyTime"), ("DLSW-MIB", "dlswTConnConfigEntryType"), ("DLSW-MIB", "dlswTConnConfigGroupDefinition"), ("DLSW-MIB", "dlswTConnConfigSetupType"), ("DLSW-MIB", "dlswTConnConfigSapList"), ("DLSW-MIB", "dlswTConnConfigAdvertiseMacNB"), ("DLSW-MIB", "dlswTConnConfigInitCirRecvWndw"), ("DLSW-MIB", "dlswTConnConfigOpens"), ("DLSW-MIB", "dlswTConnConfigRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnConfigGroup = dlswTConnConfigGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnConfigGroup.setDescription('Conformance group for the configuration of transport connections.')
dlswTConnOperGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 5)).setObjects(("DLSW-MIB", "dlswTConnOperLocalTAddr"), ("DLSW-MIB", "dlswTConnOperEntryTime"), ("DLSW-MIB", "dlswTConnOperConnectTime"), ("DLSW-MIB", "dlswTConnOperState"), ("DLSW-MIB", "dlswTConnOperConfigIndex"), ("DLSW-MIB", "dlswTConnOperFlowCntlMode"), ("DLSW-MIB", "dlswTConnOperPartnerVersion"), ("DLSW-MIB", "dlswTConnOperPartnerVendorID"), ("DLSW-MIB", "dlswTConnOperPartnerVersionStr"), ("DLSW-MIB", "dlswTConnOperPartnerInitPacingWndw"), ("DLSW-MIB", "dlswTConnOperPartnerSapList"), ("DLSW-MIB", "dlswTConnOperPartnerMacExcl"), ("DLSW-MIB", "dlswTConnOperPartnerMacInfo"), ("DLSW-MIB", "dlswTConnOperDiscTime"), ("DLSW-MIB", "dlswTConnOperDiscReason"), ("DLSW-MIB", "dlswTConnOperDiscActiveCir"), ("DLSW-MIB", "dlswTConnOperInDataPkts"), ("DLSW-MIB", "dlswTConnOperOutDataPkts"), ("DLSW-MIB", "dlswTConnOperInDataOctets"), ("DLSW-MIB", "dlswTConnOperOutDataOctets"), ("DLSW-MIB", "dlswTConnOperInCntlPkts"), ("DLSW-MIB", "dlswTConnOperOutCntlPkts"), ("DLSW-MIB", "dlswTConnOperCURexSents"), ("DLSW-MIB", "dlswTConnOperICRexRcvds"), ("DLSW-MIB", "dlswTConnOperCURexRcvds"), ("DLSW-MIB", "dlswTConnOperICRexSents"), ("DLSW-MIB", "dlswTConnOperCirCreates"), ("DLSW-MIB", "dlswTConnOperCircuits"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnOperGroup = dlswTConnOperGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnOperGroup.setDescription('Conformance group for operation information for transport connections.')
dlswTConnNBGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 6)).setObjects(("DLSW-MIB", "dlswTConnOperPartnerNBExcl"), ("DLSW-MIB", "dlswTConnOperPartnerNBInfo"), ("DLSW-MIB", "dlswTConnOperNQexSents"), ("DLSW-MIB", "dlswTConnOperNRexRcvds"), ("DLSW-MIB", "dlswTConnOperNQexRcvds"), ("DLSW-MIB", "dlswTConnOperNRexSents"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnNBGroup = dlswTConnNBGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnNBGroup.setDescription('Conformance group for operation information for transport connections, specifically for nodes that support NetBIOS.')
dlswTConnTcpConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 7)).setObjects(("DLSW-MIB", "dlswTConnTcpConfigKeepAliveInt"), ("DLSW-MIB", "dlswTConnTcpConfigTcpConnections"), ("DLSW-MIB", "dlswTConnTcpConfigMaxSegmentSize"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnTcpConfigGroup = dlswTConnTcpConfigGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpConfigGroup.setDescription('Conformance group for configuration information for transport connections using TCP.')
dlswTConnTcpOperGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 8)).setObjects(("DLSW-MIB", "dlswTConnTcpOperKeepAliveInt"), ("DLSW-MIB", "dlswTConnTcpOperPrefTcpConnections"), ("DLSW-MIB", "dlswTConnTcpOperTcpConnections"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswTConnTcpOperGroup = dlswTConnTcpOperGroup.setStatus('current')
if mibBuilder.loadTexts: dlswTConnTcpOperGroup.setDescription('Conformance group for operation information for transport connections using TCP.')
dlswInterfaceGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 9)).setObjects(("DLSW-MIB", "dlswIfRowStatus"), ("DLSW-MIB", "dlswIfVirtualSegment"), ("DLSW-MIB", "dlswIfSapList"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswInterfaceGroup = dlswInterfaceGroup.setStatus('current')
if mibBuilder.loadTexts: dlswInterfaceGroup.setDescription('Conformance group for DLSw interfaces.')
dlswDirGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 10)).setObjects(("DLSW-MIB", "dlswDirMacEntries"), ("DLSW-MIB", "dlswDirMacCacheHits"), ("DLSW-MIB", "dlswDirMacCacheMisses"), ("DLSW-MIB", "dlswDirMacCacheNextIndex"), ("DLSW-MIB", "dlswDirMacMac"), ("DLSW-MIB", "dlswDirMacMask"), ("DLSW-MIB", "dlswDirMacEntryType"), ("DLSW-MIB", "dlswDirMacLocationType"), ("DLSW-MIB", "dlswDirMacLocation"), ("DLSW-MIB", "dlswDirMacStatus"), ("DLSW-MIB", "dlswDirMacLFSize"), ("DLSW-MIB", "dlswDirMacRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirGroup = dlswDirGroup.setStatus('current')
if mibBuilder.loadTexts: dlswDirGroup.setDescription('Conformance group for DLSw directory using MAC addresses.')
dlswDirNBGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 11)).setObjects(("DLSW-MIB", "dlswDirNBEntries"), ("DLSW-MIB", "dlswDirNBCacheHits"), ("DLSW-MIB", "dlswDirNBCacheMisses"), ("DLSW-MIB", "dlswDirNBCacheNextIndex"), ("DLSW-MIB", "dlswDirNBName"), ("DLSW-MIB", "dlswDirNBNameType"), ("DLSW-MIB", "dlswDirNBEntryType"), ("DLSW-MIB", "dlswDirNBLocationType"), ("DLSW-MIB", "dlswDirNBLocation"), ("DLSW-MIB", "dlswDirNBStatus"), ("DLSW-MIB", "dlswDirNBLFSize"), ("DLSW-MIB", "dlswDirNBRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirNBGroup = dlswDirNBGroup.setStatus('current')
if mibBuilder.loadTexts: dlswDirNBGroup.setDescription('Conformance group for DLSw directory using NetBIOS names.')
dlswDirLocateGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 12)).setObjects(("DLSW-MIB", "dlswDirLocateMacLocation"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirLocateGroup = dlswDirLocateGroup.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateGroup.setDescription('Conformance group for a node that can return directory entry order for a given MAC address.')
dlswDirLocateNBGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 13)).setObjects(("DLSW-MIB", "dlswDirLocateNBLocation"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswDirLocateNBGroup = dlswDirLocateNBGroup.setStatus('current')
if mibBuilder.loadTexts: dlswDirLocateNBGroup.setDescription('Conformance group for a node that can return directory entry order for a given NetBIOS name.')
dlswCircuitStatGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 14)).setObjects(("DLSW-MIB", "dlswCircuitStatActives"), ("DLSW-MIB", "dlswCircuitStatCreates"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswCircuitStatGroup = dlswCircuitStatGroup.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitStatGroup.setDescription('Conformance group for statistics about circuits.')
dlswCircuitGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 15)).setObjects(("DLSW-MIB", "dlswCircuitS1IfIndex"), ("DLSW-MIB", "dlswCircuitS1DlcType"), ("DLSW-MIB", "dlswCircuitS1RouteInfo"), ("DLSW-MIB", "dlswCircuitS1CircuitId"), ("DLSW-MIB", "dlswCircuitS1Dlc"), ("DLSW-MIB", "dlswCircuitS2Location"), ("DLSW-MIB", "dlswCircuitS2TDomain"), ("DLSW-MIB", "dlswCircuitS2TAddress"), ("DLSW-MIB", "dlswCircuitS2CircuitId"), ("DLSW-MIB", "dlswCircuitOrigin"), ("DLSW-MIB", "dlswCircuitEntryTime"), ("DLSW-MIB", "dlswCircuitStateTime"), ("DLSW-MIB", "dlswCircuitState"), ("DLSW-MIB", "dlswCircuitPriority"), ("DLSW-MIB", "dlswCircuitFCSendGrantedUnits"), ("DLSW-MIB", "dlswCircuitFCSendCurrentWndw"), ("DLSW-MIB", "dlswCircuitFCRecvGrantedUnits"), ("DLSW-MIB", "dlswCircuitFCRecvCurrentWndw"), ("DLSW-MIB", "dlswCircuitFCLargestRecvGranted"), ("DLSW-MIB", "dlswCircuitFCLargestSendGranted"), ("DLSW-MIB", "dlswCircuitFCHalveWndwSents"), ("DLSW-MIB", "dlswCircuitFCResetOpSents"), ("DLSW-MIB", "dlswCircuitFCHalveWndwRcvds"), ("DLSW-MIB", "dlswCircuitFCResetOpRcvds"), ("DLSW-MIB", "dlswCircuitDiscReasonLocal"), ("DLSW-MIB", "dlswCircuitDiscReasonRemote"), ("DLSW-MIB", "dlswCircuitDiscReasonRemoteData"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswCircuitGroup = dlswCircuitGroup.setStatus('current')
if mibBuilder.loadTexts: dlswCircuitGroup.setDescription('Conformance group for DLSw circuits.')
dlswSdlcGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 16)).setObjects(("DLSW-MIB", "dlswSdlcLsEntries"), ("DLSW-MIB", "dlswSdlcLsLocalMac"), ("DLSW-MIB", "dlswSdlcLsLocalSap"), ("DLSW-MIB", "dlswSdlcLsLocalIdBlock"), ("DLSW-MIB", "dlswSdlcLsLocalIdNum"), ("DLSW-MIB", "dlswSdlcLsRemoteMac"), ("DLSW-MIB", "dlswSdlcLsRemoteSap"), ("DLSW-MIB", "dlswSdlcLsRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswSdlcGroup = dlswSdlcGroup.setStatus('current')
if mibBuilder.loadTexts: dlswSdlcGroup.setDescription('Conformance group for DLSw SDLC support.')
dlswNotificationGroup = NotificationGroup((1, 3, 6, 1, 2, 1, 46, 3, 2, 17)).setObjects(("DLSW-MIB", "dlswTrapTConnPartnerReject"), ("DLSW-MIB", "dlswTrapTConnProtViolation"), ("DLSW-MIB", "dlswTrapTConnUp"), ("DLSW-MIB", "dlswTrapTConnDown"), ("DLSW-MIB", "dlswTrapCircuitUp"), ("DLSW-MIB", "dlswTrapCircuitDown"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlswNotificationGroup = dlswNotificationGroup.setStatus('current')
if mibBuilder.loadTexts: dlswNotificationGroup.setDescription('Conformance group for DLSw notifications.')
mibBuilder.exportSymbols("DLSW-MIB", dlswCircuitStatGroup=dlswCircuitStatGroup, dlswNodeVirtualSegmentLFSize=dlswNodeVirtualSegmentLFSize, dlswIfRowStatus=dlswIfRowStatus, dlswTrapTConnProtViolation=dlswTrapTConnProtViolation, dlswDirNBLFSize=dlswDirNBLFSize, dlswCircuitS2TDomain=dlswCircuitS2TDomain, dlswTConnStatCloseBusys=dlswTConnStatCloseBusys, dlswDirMacIndex=dlswDirMacIndex, dlswDirLocateNBGroup=dlswDirLocateNBGroup, dlswDirLocateNBLocation=dlswDirLocateNBLocation, dlswCircuitS2Location=dlswCircuitS2Location, dlswDirCache=dlswDirCache, dlswSdlcLsLocalSap=dlswSdlcLsLocalSap, dlswSdlcLsLocalIdBlock=dlswSdlcLsLocalIdBlock, dlswNotificationGroup=dlswNotificationGroup, dlswInterface=dlswInterface, dlswTrapTConnPartnerReject=dlswTrapTConnPartnerReject, dlswCircuitFCSendCurrentWndw=dlswCircuitFCSendCurrentWndw, dlswTrapCntlTConnProtViolation=dlswTrapCntlTConnProtViolation, EndStationLocation=EndStationLocation, dlswTConnOperDiscTime=dlswTConnOperDiscTime, dlswTConnOperPartnerInitPacingWndw=dlswTConnOperPartnerInitPacingWndw, dlswTConnOperEntryTime=dlswTConnOperEntryTime, dlswTConnOperPartnerMacInfo=dlswTConnOperPartnerMacInfo, dlswTConnOperCURexSents=dlswTConnOperCURexSents, dlswDirStat=dlswDirStat, dlswDirMacCacheHits=dlswDirMacCacheHits, dlswDirLocate=dlswDirLocate, dlswCircuitOrigin=dlswCircuitOrigin, dlswDirMacCacheMisses=dlswDirMacCacheMisses, dlswTConnTcpOperKeepAliveInt=dlswTConnTcpOperKeepAliveInt, dlswCircuitFCLargestRecvGranted=dlswCircuitFCLargestRecvGranted, dlswCircuitS2CircuitId=dlswCircuitS2CircuitId, PYSNMP_MODULE_ID=dlsw, dlswTConnConfigIndex=dlswTConnConfigIndex, dlswDirNBGroup=dlswDirNBGroup, dlswNodeGroup=dlswNodeGroup, dlswTConnConfigInitCirRecvWndw=dlswTConnConfigInitCirRecvWndw, dlswMIB=dlswMIB, dlswDirMacLFSize=dlswDirMacLFSize, dlswTConnOperPartnerMacExcl=dlswTConnOperPartnerMacExcl, dlswDirCompliance=dlswDirCompliance, dlswTConnTcpConfigEntry=dlswTConnTcpConfigEntry, dlswDirNBLocationType=dlswDirNBLocationType, dlswNode=dlswNode, dlswTConnConfigEntry=dlswTConnConfigEntry, dlswSdlcLsLocalIdNum=dlswSdlcLsLocalIdNum, dlsw=dlsw, dlswDirNBLocation=dlswDirNBLocation, dlswTConnStatCloseIdles=dlswTConnStatCloseIdles, dlswTConnOperEntry=dlswTConnOperEntry, dlswDirLocateNBEntry=dlswDirLocateNBEntry, dlswTraps=dlswTraps, dlswCircuitStatCreates=dlswCircuitStatCreates, dlswDirNBCacheHits=dlswDirNBCacheHits, dlswDirNBNameType=dlswDirNBNameType, dlswTConnOperCirCreates=dlswTConnOperCirCreates, dlswTConnConfigTDomain=dlswTConnConfigTDomain, dlswTConnOperInCntlPkts=dlswTConnOperInCntlPkts, dlswIfEntry=dlswIfEntry, dlswDirNBCacheNextIndex=dlswDirNBCacheNextIndex, null=null, dlswTConnStatActiveConnections=dlswTConnStatActiveConnections, DlcType=DlcType, dlswTConnOperInDataOctets=dlswTConnOperInDataOctets, dlswIfSapList=dlswIfSapList, dlswDirMacEntryType=dlswDirMacEntryType, dlswTConnOperTDomain=dlswTConnOperTDomain, dlswCircuitStatActives=dlswCircuitStatActives, TAddress=TAddress, dlswTConnOperNQexSents=dlswTConnOperNQexSents, dlswDirNBRowStatus=dlswDirNBRowStatus, dlswDirNBEntryType=dlswDirNBEntryType, dlswCircuitS1RouteInfo=dlswCircuitS1RouteInfo, dlswTConnConfigGroup=dlswTConnConfigGroup, dlswTConnConfigRowStatus=dlswTConnConfigRowStatus, dlswCircuitState=dlswCircuitState, dlswCircuitEntry=dlswCircuitEntry, dlswCircuitGroup=dlswCircuitGroup, dlswTConnOperOutDataPkts=dlswTConnOperOutDataPkts, dlswTConnTcpConfigTcpConnections=dlswTConnTcpConfigTcpConnections, dlswIfTable=dlswIfTable, dlswDirGroup=dlswDirGroup, dlswDirNBEntries=dlswDirNBEntries, dlswNodeStdPacingSupport=dlswNodeStdPacingSupport, dlswCircuitPriority=dlswCircuitPriority, dlswNodeStatus=dlswNodeStatus, dlswCircuitS2TAddress=dlswCircuitS2TAddress, dlswDirLocateCompliance=dlswDirLocateCompliance, dlswTConn=dlswTConn, dlswCircuitS1CircuitId=dlswCircuitS1CircuitId, dlswSdlcGroup=dlswSdlcGroup, NBName=NBName, dlswIfVirtualSegment=dlswIfVirtualSegment, dlswTConnOperPartnerNBExcl=dlswTConnOperPartnerNBExcl, dlswTConnOperNRexSents=dlswTConnOperNRexSents, dlswTConnTcpOperTable=dlswTConnTcpOperTable, dlswSdlcLsTable=dlswSdlcLsTable, dlswDirLocateMacTable=dlswDirLocateMacTable, dlswTConnOperNQexRcvds=dlswTConnOperNQexRcvds, dlswCircuitFCSendGrantedUnits=dlswCircuitFCSendGrantedUnits, dlswTConnOperTable=dlswTConnOperTable, dlswTConnConfigSapList=dlswTConnConfigSapList, dlswDirMacRowStatus=dlswDirMacRowStatus, DlswTCPAddress=DlswTCPAddress, dlswDirMacEntries=dlswDirMacEntries, dlswTConnConfigEntryType=dlswTConnConfigEntryType, dlswTConnOperInDataPkts=dlswTConnOperInDataPkts, dlswCircuitS2Mac=dlswCircuitS2Mac, dlswDirMacLocationType=dlswDirMacLocationType, dlswTConnOperFlowCntlMode=dlswTConnOperFlowCntlMode, dlswCircuitFCHalveWndwRcvds=dlswCircuitFCHalveWndwRcvds, dlswDirLocateMacEntry=dlswDirLocateMacEntry, dlswSdlc=dlswSdlc, dlswDirNBTable=dlswDirNBTable, dlswCircuitFCRecvGrantedUnits=dlswCircuitFCRecvGrantedUnits, dlswTConnStat=dlswTConnStat, dlswDirLocateNBTable=dlswDirLocateNBTable, dlswDirLocateNBMatch=dlswDirLocateNBMatch, dlswDirLocateGroup=dlswDirLocateGroup, dlswNodeVendorID=dlswNodeVendorID, dlswCircuitStateTime=dlswCircuitStateTime, dlswDirMacEntry=dlswDirMacEntry, dlswDirLocateMacMatch=dlswDirLocateMacMatch, dlswNodeUpTime=dlswNodeUpTime, dlswTConnTcpConfigGroup=dlswTConnTcpConfigGroup, dlswCircuitTable=dlswCircuitTable, dlswCircuitFCHalveWndwSents=dlswCircuitFCHalveWndwSents, dlswTConnConfigOpens=dlswTConnConfigOpens, dlswTConnTcpOperPrefTcpConnections=dlswTConnTcpOperPrefTcpConnections, dlswSdlcCompliance=dlswSdlcCompliance, dlswTConnConfigLocalTAddr=dlswTConnConfigLocalTAddr, dlswTConnOperConnectTime=dlswTConnOperConnectTime, dlswCircuitS2Sap=dlswCircuitS2Sap, dlswTConnNBGroup=dlswTConnNBGroup, dlswNodeResourceMacExclusivity=dlswNodeResourceMacExclusivity, dlswTrapTConnDown=dlswTrapTConnDown, dlswCircuitS1IfIndex=dlswCircuitS1IfIndex, dlswCircuitFCLargestSendGranted=dlswCircuitFCLargestSendGranted, dlswTrapCircuitUp=dlswTrapCircuitUp, dlswTrapCircuitDown=dlswTrapCircuitDown, dlswTrapCntlTConn=dlswTrapCntlTConn, dlswTConnOperRemoteTAddr=dlswTConnOperRemoteTAddr, dlswInterfaceGroup=dlswInterfaceGroup, dlswTConnOperState=dlswTConnOperState, dlswTrapCntlTConnPartnerReject=dlswTrapCntlTConnPartnerReject, dlswGroups=dlswGroups, dlswDirLocateMacLocation=dlswDirLocateMacLocation, dlswTConnTcpOperEntry=dlswTConnTcpOperEntry, dlswTConnConfigLastModifyTime=dlswTConnConfigLastModifyTime, dlswTConnOperConfigIndex=dlswTConnOperConfigIndex, dlswCircuitFCResetOpSents=dlswCircuitFCResetOpSents, dlswDirMacMac=dlswDirMacMac, dlswTConnTcpOperGroup=dlswTConnTcpOperGroup, dlswTConnOperDiscActiveCir=dlswTConnOperDiscActiveCir, dlswTConnConfigGroupDefinition=dlswTConnConfigGroupDefinition, dlswDirMacCacheNextIndex=dlswDirMacCacheNextIndex, dlswSdlcLsRemoteSap=dlswSdlcLsRemoteSap, dlswTConnTcpConfigMaxSegmentSize=dlswTConnTcpConfigMaxSegmentSize, dlswTConnStatGroup=dlswTConnStatGroup, dlswDirectory=dlswDirectory, dlswDirMacMask=dlswDirMacMask, dlswDirMacTable=dlswDirMacTable, dlswTConnTcpConfigKeepAliveInt=dlswTConnTcpConfigKeepAliveInt, dlswTConnOperICRexSents=dlswTConnOperICRexSents, dlswTrapControl=dlswTrapControl, dlswTConnConfigTable=dlswTConnConfigTable, MacAddressNC=MacAddressNC, dlswTConnOperICRexRcvds=dlswTConnOperICRexRcvds, dlswCircuitS1Sap=dlswCircuitS1Sap, dlswTConnOperOutCntlPkts=dlswTConnOperOutCntlPkts, dlswTConnOperOutDataOctets=dlswTConnOperOutDataOctets, dlswTConnOperNRexRcvds=dlswTConnOperNRexRcvds, dlswCircuitS1Mac=dlswCircuitS1Mac, dlswTConnConfigRemoteTAddr=dlswTConnConfigRemoteTAddr, dlswTConnOperPartnerVendorID=dlswTConnOperPartnerVendorID, dlswTConnOperCURexRcvds=dlswTConnOperCURexRcvds, dlswDirNBStatus=dlswDirNBStatus, dlswCircuitS1Dlc=dlswCircuitS1Dlc, dlswTrapCntlCircuit=dlswTrapCntlCircuit, dlswCircuitEntryTime=dlswCircuitEntryTime, dlswTConnConfigAdvertiseMacNB=dlswTConnConfigAdvertiseMacNB, dlswNodeResourceNBExclusivity=dlswNodeResourceNBExclusivity, dlswNodeNBGroup=dlswNodeNBGroup, dlswDirNBEntry=dlswDirNBEntry, dlswSdlcLsRowStatus=dlswSdlcLsRowStatus, LFSize=LFSize, dlswDomains=dlswDomains, dlswCircuitDiscReasonLocal=dlswCircuitDiscReasonLocal, dlswSdlcLsRemoteMac=dlswSdlcLsRemoteMac, dlswTConnConfigSetupType=dlswTConnConfigSetupType, dlswNodeVersionString=dlswNodeVersionString, dlswTConnOperPartnerVersion=dlswTConnOperPartnerVersion, dlswCircuitDiscReasonRemote=dlswCircuitDiscReasonRemote, dlswTConnOperGroup=dlswTConnOperGroup, dlswSdlcLsLocalMac=dlswSdlcLsLocalMac, dlswCircuitStat=dlswCircuitStat, dlswCircuitFCResetOpRcvds=dlswCircuitFCResetOpRcvds, dlswTConnTcpOperTcpConnections=dlswTConnTcpOperTcpConnections, dlswTConnTcp=dlswTConnTcp, dlswSdlcLsEntry=dlswSdlcLsEntry, dlswDirLocateNBName=dlswDirLocateNBName, dlswTConnOperPartnerSapList=dlswTConnOperPartnerSapList, dlswCircuitFCRecvCurrentWndw=dlswCircuitFCRecvCurrentWndw, dlswSdlcLsEntries=dlswSdlcLsEntries, dlswTConnOperDiscReason=dlswTConnOperDiscReason, dlswTConnOperPartnerNBInfo=dlswTConnOperPartnerNBInfo, dlswDirMacLocation=dlswDirMacLocation, dlswDirNBIndex=dlswDirNBIndex, dlswConformance=dlswConformance, dlswTConnTcpCompliance=dlswTConnTcpCompliance, dlswCircuit=dlswCircuit, dlswTConnTcpConfigTable=dlswTConnTcpConfigTable, dlswTConnOperCircuits=dlswTConnOperCircuits, dlswDirMacStatus=dlswDirMacStatus, dlswTConnOperLocalTAddr=dlswTConnOperLocalTAddr, dlswTConnOperPartnerVersionStr=dlswTConnOperPartnerVersionStr, dlswCircuitDiscReasonRemoteData=dlswCircuitDiscReasonRemoteData, dlswCircuitS1DlcType=dlswCircuitS1DlcType, dlswTConnSpecific=dlswTConnSpecific, dlswTCPDomain=dlswTCPDomain, dlswDirNBCacheMisses=dlswDirNBCacheMisses, dlswDirLocateMacMac=dlswDirLocateMacMac, dlswDirNBName=dlswDirNBName, dlswTrapTConnUp=dlswTrapTConnUp, dlswCoreCompliance=dlswCoreCompliance, dlswNodeVersion=dlswNodeVersion, dlswCompliances=dlswCompliances)
| 0
| 4,211
| 158
|
1967d13c3f768e829c80718e3dba109dbd9aa06d
| 4,522
|
py
|
Python
|
src/WikiDetector.py
|
bharat-suri/ComplexEmbeddings
|
f9fd76dd22ab5f4fcccc31e61b6a9a86e6de9ff0
|
[
"MIT"
] | 1
|
2018-05-09T09:28:15.000Z
|
2018-05-09T09:28:15.000Z
|
src/WikiDetector.py
|
tramplingWillow/ComplexEmbeddings
|
f9fd76dd22ab5f4fcccc31e61b6a9a86e6de9ff0
|
[
"MIT"
] | 4
|
2021-03-18T20:33:00.000Z
|
2022-01-13T00:46:20.000Z
|
src/WikiDetector.py
|
bharat-suri/ComplexEmbeddings
|
f9fd76dd22ab5f4fcccc31e61b6a9a86e6de9ff0
|
[
"MIT"
] | null | null | null |
import os
import re
import sys
import time
import tempfile
from joblib import Parallel, delayed
from urllib.parse import unquote
from collections import Counter
def upcase_first_letter(s):
"""
Capitalize the string.
"""
return s[0].upper() + s[1:]
def replaceAnchorText(filename):
"""
Given the input file, the surface forms loaded from anchor text
are used to extract entity mentions and replace them with the
article title in the text corpus itself.
Arguments
---------
filename : Input file containing the extracted text.
"""
print(filename)
# A temporary file to track the input file document by document.
t = tempfile.NamedTemporaryFile(mode = "r+")
dictionary = {}
with open(filename, 'r') as fil:
for line in fil:
if line.startswith("<doc"):
t.write(line)
# Get the title of the document from XML
title = line.split('title="')[1].split('">')[0]
TITLE = title.replace(' ', '_')
dictionary = {}
next(fil)
# Global surface forms dictionary
global surfForms
try:
dictionary[TITLE] = surfForms[TITLE]
except:
dictionary[TITLE] = set([title])
# Gender dictionary from checking persons
global gender
try:
if gender[title] == 'f':
dictionary[TITLE].add('she'); dictionary[TITLE].add('her'); dictionary[TITLE].add('hers')
else:
dictionary[TITLE].add('he'); dictionary[TITLE].add('him'); dictionary[TITLE].add('his')
except:
pass
continue
# Regular expressions to find and replace anchor text with resource entity
elif not line == '\n':
links = re.findall(r'\<a href\=\"([^\"\:]+)\"\>([^\<]+)\</a\>', line)
for link in links:
entity = link[0].replace('wikt%3A', ''); entity = entity.replace('wiktionary%3A', '')
if entity == '':
entity = link[1]
entity = unquote(entity[0].upper() + entity[1:]).replace(' ', '_')
anchor = link[1].split(' (')[0]
anchor = re.escape(anchor)
if entity not in dictionary:
dictionary[entity] = set()
dictionary[entity].add(anchor)
line = re.sub('<.*?>', '', line)
for entity in sorted(dictionary, key = len, reverse = True):
for surfaceForm in sorted(dictionary[entity], key = len, reverse = True):
try:
line = re.sub(r"\b(?<![\/\(])%s\b" % surfaceForm, 'resource/' + entity , line, flags = re.IGNORECASE)
except:
dictionary[entity].remove(surfaceForm)
if not line == '\n':
t.write(line)
t.seek(0)
with open(filename, 'w') as output:
for line in t:
output.write(line)
t.close()
return None
def loadSurfaceForms(filename, most_cmmn):
"""
Takes the surface form dictionary as input and
returns the loaded entities mapped onto their
most common surface forms.
Arguments
---------
filename : Input dictionary
most_cmmn : Parameter to decide the most common surface forms
"""
surfaceForm = {}
c = 0
with open(filename, 'r') as output:
for line in output:
c += 1
print('Loading surface forms: ' + str(int(c*1000/746565)/10) + '%', end = '\r')
surfaceForm[line.split(';', 1)[0]] = set(x[0] for x in Counter(line.rstrip('\n').split(';', 1)[1].split(';')).most_common(most_cmmn))
return surfaceForm
def loadDictionary(filename):
"""
Loads the entire surface form dictionary from memory
"""
surfaceForm = {}
with open(filename, 'r') as output:
for line in output:
try:
surfaceForm[line.rsplit(';', 1)[0]] = line.rstrip('\n').rsplit(';', 1)[1]
except:
pass
return surfaceForm
def splitFiles(directory):
"""
Iterate through the files in the extracted directory
"""
names = []
for root, dirs, files in os.walk(directory):
for file in files:
names.append(root + '/' + file)
flag = False
for name in names:
with open(name, 'r') as inp:
dirname = name + '_'
os.mkdir(dirname)
for line in inp:
if line.startswith('</doc'):
continue
elif line.startswith('<doc'):
filename = upcase_first_letter(line.split('title="')[1].split('">')[0]).replace(' ', '_')
else:
with open(dirname + '/' + filename, '+a') as output:
if not line == '\n':
output.write(line)
os.remove(name)
return None
if __name__ == "__main__":
directory = sys.argv[1]
surfForms = loadSurfaceForms("data/AnchorDictionary.csv", 5)
gender = loadDictionary('data/gender.csv')
names = []
for root, dirs, files in os.walk(directory):
for file in files:
names.append(root + '/' + file)
Parallel(n_jobs = 8, verbose = 51)(delayed(replaceAnchorText)(name) for name in names)
| 27.077844
| 136
| 0.639982
|
import os
import re
import sys
import time
import tempfile
from joblib import Parallel, delayed
from urllib.parse import unquote
from collections import Counter
def upcase_first_letter(s):
"""
Capitalize the string.
"""
return s[0].upper() + s[1:]
def replaceAnchorText(filename):
"""
Given the input file, the surface forms loaded from anchor text
are used to extract entity mentions and replace them with the
article title in the text corpus itself.
Arguments
---------
filename : Input file containing the extracted text.
"""
print(filename)
# A temporary file to track the input file document by document.
t = tempfile.NamedTemporaryFile(mode = "r+")
dictionary = {}
with open(filename, 'r') as fil:
for line in fil:
if line.startswith("<doc"):
t.write(line)
# Get the title of the document from XML
title = line.split('title="')[1].split('">')[0]
TITLE = title.replace(' ', '_')
dictionary = {}
next(fil)
# Global surface forms dictionary
global surfForms
try:
dictionary[TITLE] = surfForms[TITLE]
except:
dictionary[TITLE] = set([title])
# Gender dictionary from checking persons
global gender
try:
if gender[title] == 'f':
dictionary[TITLE].add('she'); dictionary[TITLE].add('her'); dictionary[TITLE].add('hers')
else:
dictionary[TITLE].add('he'); dictionary[TITLE].add('him'); dictionary[TITLE].add('his')
except:
pass
continue
# Regular expressions to find and replace anchor text with resource entity
elif not line == '\n':
links = re.findall(r'\<a href\=\"([^\"\:]+)\"\>([^\<]+)\</a\>', line)
for link in links:
entity = link[0].replace('wikt%3A', ''); entity = entity.replace('wiktionary%3A', '')
if entity == '':
entity = link[1]
entity = unquote(entity[0].upper() + entity[1:]).replace(' ', '_')
anchor = link[1].split(' (')[0]
anchor = re.escape(anchor)
if entity not in dictionary:
dictionary[entity] = set()
dictionary[entity].add(anchor)
line = re.sub('<.*?>', '', line)
for entity in sorted(dictionary, key = len, reverse = True):
for surfaceForm in sorted(dictionary[entity], key = len, reverse = True):
try:
line = re.sub(r"\b(?<![\/\(])%s\b" % surfaceForm, 'resource/' + entity , line, flags = re.IGNORECASE)
except:
dictionary[entity].remove(surfaceForm)
if not line == '\n':
t.write(line)
t.seek(0)
with open(filename, 'w') as output:
for line in t:
output.write(line)
t.close()
return None
def loadSurfaceForms(filename, most_cmmn):
"""
Takes the surface form dictionary as input and
returns the loaded entities mapped onto their
most common surface forms.
Arguments
---------
filename : Input dictionary
most_cmmn : Parameter to decide the most common surface forms
"""
surfaceForm = {}
c = 0
with open(filename, 'r') as output:
for line in output:
c += 1
print('Loading surface forms: ' + str(int(c*1000/746565)/10) + '%', end = '\r')
surfaceForm[line.split(';', 1)[0]] = set(x[0] for x in Counter(line.rstrip('\n').split(';', 1)[1].split(';')).most_common(most_cmmn))
return surfaceForm
def loadDictionary(filename):
"""
Loads the entire surface form dictionary from memory
"""
surfaceForm = {}
with open(filename, 'r') as output:
for line in output:
try:
surfaceForm[line.rsplit(';', 1)[0]] = line.rstrip('\n').rsplit(';', 1)[1]
except:
pass
return surfaceForm
def splitFiles(directory):
"""
Iterate through the files in the extracted directory
"""
names = []
for root, dirs, files in os.walk(directory):
for file in files:
names.append(root + '/' + file)
flag = False
for name in names:
with open(name, 'r') as inp:
dirname = name + '_'
os.mkdir(dirname)
for line in inp:
if line.startswith('</doc'):
continue
elif line.startswith('<doc'):
filename = upcase_first_letter(line.split('title="')[1].split('">')[0]).replace(' ', '_')
else:
with open(dirname + '/' + filename, '+a') as output:
if not line == '\n':
output.write(line)
os.remove(name)
return None
if __name__ == "__main__":
directory = sys.argv[1]
surfForms = loadSurfaceForms("data/AnchorDictionary.csv", 5)
gender = loadDictionary('data/gender.csv')
names = []
for root, dirs, files in os.walk(directory):
for file in files:
names.append(root + '/' + file)
Parallel(n_jobs = 8, verbose = 51)(delayed(replaceAnchorText)(name) for name in names)
| 0
| 0
| 0
|
3aab30f84514f366abf8ccb4ddb25329d9e6ab4f
| 778
|
py
|
Python
|
python_learning/basic_learning/lesson01.py
|
suncht/sun-python
|
b02cba75c142adc44438192577cae171cb4837d4
|
[
"Apache-2.0"
] | null | null | null |
python_learning/basic_learning/lesson01.py
|
suncht/sun-python
|
b02cba75c142adc44438192577cae171cb4837d4
|
[
"Apache-2.0"
] | null | null | null |
python_learning/basic_learning/lesson01.py
|
suncht/sun-python
|
b02cba75c142adc44438192577cae171cb4837d4
|
[
"Apache-2.0"
] | null | null | null |
words = ['car', 'window', 'ddd']
print('------一般遍历------------')
for w in words[1:-1]:
print(w, len(w))
print('------带索引的遍历1------------')
for index, w in enumerate(words):
print(index, w)
print('------带索引的遍历2------------')
for index, w in enumerate(words, start=1):
print(index, w)
maps = {'10': 'sun', '11': 'li', '12': 'cheng'}
print('------map遍历01------------')
for k, v in maps.items():
print(k, v)
print('------带索引的map遍历------------')
for index, (k, v) in enumerate(maps.items()):
print(index, k, v)
import itertools
k = ['a', 'b', 'c']
v = ['11', '22', '33', '44']
print('------zip遍历01------------')
for _k, _v in zip(k, v):
print(_k, _v)
print('------zip遍历02------------')
for _k, _v in itertools.zip_longest(k, v):
print(_k, _v)
| 19.948718
| 47
| 0.484576
|
words = ['car', 'window', 'ddd']
print('------一般遍历------------')
for w in words[1:-1]:
print(w, len(w))
print('------带索引的遍历1------------')
for index, w in enumerate(words):
print(index, w)
print('------带索引的遍历2------------')
for index, w in enumerate(words, start=1):
print(index, w)
maps = {'10': 'sun', '11': 'li', '12': 'cheng'}
print('------map遍历01------------')
for k, v in maps.items():
print(k, v)
print('------带索引的map遍历------------')
for index, (k, v) in enumerate(maps.items()):
print(index, k, v)
import itertools
k = ['a', 'b', 'c']
v = ['11', '22', '33', '44']
print('------zip遍历01------------')
for _k, _v in zip(k, v):
print(_k, _v)
print('------zip遍历02------------')
for _k, _v in itertools.zip_longest(k, v):
print(_k, _v)
| 0
| 0
| 0
|
8283124bb8ae178fd38985032f44e4cce556c9a5
| 3,720
|
py
|
Python
|
data_prep.py
|
AlexTheM8/keras-frcnn
|
2e70679865385dcd861b4be8cac1a6c8ad0e3455
|
[
"Apache-2.0"
] | null | null | null |
data_prep.py
|
AlexTheM8/keras-frcnn
|
2e70679865385dcd861b4be8cac1a6c8ad0e3455
|
[
"Apache-2.0"
] | 1
|
2022-01-17T21:42:03.000Z
|
2022-01-17T21:42:03.000Z
|
data_prep.py
|
AlexTheM8/keras-frcnn
|
2e70679865385dcd861b4be8cac1a6c8ad0e3455
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import cv2
# initialize the list of reference points
refPt = []
r = None
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True, help="Path to the images")
ap.add_argument("-a", "--annotate", required=False, help="Path to save annotations", default="train.txt")
ap.add_argument("-l", "--label", required=True, help="Label for region")
args = vars(ap.parse_args())
data = []
for f in os.listdir(args["path"]):
if not f.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
continue
fullpath = f'{args["path"]}/{f}'
# load the image, clone it, and setup the mouse callback function
image = cv2.imread(fullpath)
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", select_area)
confirm = False
while not confirm:
try:
# keep looping until the 'q' key is pressed or confirmed
# display the image and wait for a keypress
resize = ResizeWithAspectRatio(image, height=790)
cv2.imshow("image", resize)
key = cv2.waitKey() & 0xFF
# if the 'c' key is pressed, confirm & continue
if key == ord("c"):
if len(refPt) > 0:
x1 = max(refPt[0][0] if refPt[0][0] < refPt[1][0] else refPt[1][0], 0)
x2 = min(refPt[0][0] if refPt[0][0] > refPt[1][0] else refPt[1][0], resize.shape[:2][1])
y1 = max(refPt[0][1] if refPt[0][1] < refPt[1][1] else refPt[1][1], 0)
y2 = min(refPt[0][1] if refPt[0][1] > refPt[1][1] else refPt[1][1], resize.shape[:2][0])
if r is None:
data.append(','.join([fullpath, str(x1), str(y1), str(x2), str(y2), args["label"]]))
else:
data.append(
','.join([fullpath, str(int(x1 / r)), str(int(y1 / r)), str(int(x2 / r)), str(int(y2 / r)),
args["label"]]))
confirm = True
# if the 'r' key is pressed, reset selection
elif key == ord("r"):
refPt = []
image = clone.copy()
# Quit
elif key == ord('q'):
# close all open windows
cv2.destroyAllWindows()
save_and_close()
exit()
except Exception as e:
print(e)
image = clone.copy()
save_and_close()
| 35.769231
| 119
| 0.541398
|
import argparse
import os
import cv2
# initialize the list of reference points
refPt = []
r = None
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
global r
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
def select_area(event, x, y, flags, param):
# grab references to the global variables
global refPt
# if the left mouse button was clicked, record the starting
# (x, y) coordinates
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates
refPt.append((x, y))
# draw a rectangle around the region of interest
cv2.rectangle(resize, refPt[0], refPt[1], (0, 255, 0), 2)
cv2.imshow("image", resize)
def save_and_close():
with open(args["annotate"], 'a') as file:
for d in data:
file.write('\n'+d)
file.close()
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True, help="Path to the images")
ap.add_argument("-a", "--annotate", required=False, help="Path to save annotations", default="train.txt")
ap.add_argument("-l", "--label", required=True, help="Label for region")
args = vars(ap.parse_args())
data = []
for f in os.listdir(args["path"]):
if not f.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
continue
fullpath = f'{args["path"]}/{f}'
# load the image, clone it, and setup the mouse callback function
image = cv2.imread(fullpath)
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", select_area)
confirm = False
while not confirm:
try:
# keep looping until the 'q' key is pressed or confirmed
# display the image and wait for a keypress
resize = ResizeWithAspectRatio(image, height=790)
cv2.imshow("image", resize)
key = cv2.waitKey() & 0xFF
# if the 'c' key is pressed, confirm & continue
if key == ord("c"):
if len(refPt) > 0:
x1 = max(refPt[0][0] if refPt[0][0] < refPt[1][0] else refPt[1][0], 0)
x2 = min(refPt[0][0] if refPt[0][0] > refPt[1][0] else refPt[1][0], resize.shape[:2][1])
y1 = max(refPt[0][1] if refPt[0][1] < refPt[1][1] else refPt[1][1], 0)
y2 = min(refPt[0][1] if refPt[0][1] > refPt[1][1] else refPt[1][1], resize.shape[:2][0])
if r is None:
data.append(','.join([fullpath, str(x1), str(y1), str(x2), str(y2), args["label"]]))
else:
data.append(
','.join([fullpath, str(int(x1 / r)), str(int(y1 / r)), str(int(x2 / r)), str(int(y2 / r)),
args["label"]]))
confirm = True
# if the 'r' key is pressed, reset selection
elif key == ord("r"):
refPt = []
image = clone.copy()
# Quit
elif key == ord('q'):
# close all open windows
cv2.destroyAllWindows()
save_and_close()
exit()
except Exception as e:
print(e)
image = clone.copy()
save_and_close()
| 1,080
| 0
| 69
|
b798273ce1f1b9a4a531453f3e36c04301dc8cc3
| 1,454
|
py
|
Python
|
util/manual_checks.py
|
RealA10N/wikiquote
|
c8daddd38d6d59744309f60a2bf911262e46988d
|
[
"MIT"
] | 55
|
2015-02-06T11:06:01.000Z
|
2018-04-19T19:09:30.000Z
|
util/manual_checks.py
|
RealA10N/wikiquote
|
c8daddd38d6d59744309f60a2bf911262e46988d
|
[
"MIT"
] | 17
|
2015-02-14T19:07:57.000Z
|
2018-04-27T03:06:30.000Z
|
util/manual_checks.py
|
RealA10N/wikiquote
|
c8daddd38d6d59744309f60a2bf911262e46988d
|
[
"MIT"
] | 24
|
2015-02-08T04:15:38.000Z
|
2018-09-09T14:48:29.000Z
|
import wikiquote
# manual_checks.py
# A short script to manually test wikiquote's functionality
MAX_QUOTE_LEN = 70
articles = [
'Barack Obama',
'Albert Einstein',
'Ada Lovelace',
'Leonard Cohen'
]
for lang in wikiquote.supported_languages():
print('\n----------------------------------------------------')
print('\nLanguage: {}'.format(lang))
print('\n----------------------------------------------------\n')
print('QOTD:')
try:
qotd, author = wikiquote.quote_of_the_day(lang=lang)
print(qotd)
print(' by: {}'.format(author))
except Exception as e:
print(e)
for article in articles:
print('\nArticle: {}'.format(article))
try:
results = wikiquote.search(article, lang=lang)
if results:
print('Results:')
for result in results:
print(' - {}'.format(result))
print()
quotes = wikiquote.quotes(results[0], lang=lang, max_quotes=10)
if quotes:
for quote in quotes:
if len(quote) > MAX_QUOTE_LEN:
quote = quote[:MAX_QUOTE_LEN] + '...'
print(' - {}'.format(quote))
else:
print('NO QUOTES!')
else:
print('NO RESULTS!')
except Exception as e:
print(e)
| 28.509804
| 79
| 0.462173
|
import wikiquote
# manual_checks.py
# A short script to manually test wikiquote's functionality
MAX_QUOTE_LEN = 70
articles = [
'Barack Obama',
'Albert Einstein',
'Ada Lovelace',
'Leonard Cohen'
]
for lang in wikiquote.supported_languages():
print('\n----------------------------------------------------')
print('\nLanguage: {}'.format(lang))
print('\n----------------------------------------------------\n')
print('QOTD:')
try:
qotd, author = wikiquote.quote_of_the_day(lang=lang)
print(qotd)
print(' by: {}'.format(author))
except Exception as e:
print(e)
for article in articles:
print('\nArticle: {}'.format(article))
try:
results = wikiquote.search(article, lang=lang)
if results:
print('Results:')
for result in results:
print(' - {}'.format(result))
print()
quotes = wikiquote.quotes(results[0], lang=lang, max_quotes=10)
if quotes:
for quote in quotes:
if len(quote) > MAX_QUOTE_LEN:
quote = quote[:MAX_QUOTE_LEN] + '...'
print(' - {}'.format(quote))
else:
print('NO QUOTES!')
else:
print('NO RESULTS!')
except Exception as e:
print(e)
| 0
| 0
| 0
|
8da94531cabea94207c2a7225f8dc11dfe2a2af3
| 1,133
|
py
|
Python
|
src/Modules/Web/web_configMenu.py
|
bopopescu/PyHouse_1
|
6444ed0b4c38ab59b9e419e4d54d65d598e6a54e
|
[
"MIT"
] | 1
|
2016-09-21T19:30:21.000Z
|
2016-09-21T19:30:21.000Z
|
src/Modules/Web/web_configMenu.py
|
bopopescu/PyHouse_1
|
6444ed0b4c38ab59b9e419e4d54d65d598e6a54e
|
[
"MIT"
] | null | null | null |
src/Modules/Web/web_configMenu.py
|
bopopescu/PyHouse_1
|
6444ed0b4c38ab59b9e419e4d54d65d598e6a54e
|
[
"MIT"
] | 1
|
2020-07-23T11:13:36.000Z
|
2020-07-23T11:13:36.000Z
|
"""
-*- test-case-name: PyHouse.src.Modules.Web.test.test_web_configMenu -*-
@name: PyHouse/src/Modules/Web/web_configMenu.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2015 by D. Brian Kimmel
@license: MIT License
@note: Created on Aug 23, 2015
@Summary:
"""
# Import system type stuff
import os
from nevow import loaders
from nevow import athena
# Import PyMh files and modules.
from Modules.Computer import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.webCfgMenu ')
# Handy helper for finding external resources nearby.
webpath = os.path.join(os.path.split(__file__)[0])
templatepath = os.path.join(webpath, 'template')
class ConfigMenuElement(athena.LiveElement):
"""
"""
docFactory = loaders.xmlfile(os.path.join(templatepath, 'configMenuElement.html'))
jsClass = u'configMenu.ConfigMenuWidget'
# ## END DBK
| 27.634146
| 87
| 0.701677
|
"""
-*- test-case-name: PyHouse.src.Modules.Web.test.test_web_configMenu -*-
@name: PyHouse/src/Modules/Web/web_configMenu.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2015 by D. Brian Kimmel
@license: MIT License
@note: Created on Aug 23, 2015
@Summary:
"""
# Import system type stuff
import os
from nevow import loaders
from nevow import athena
# Import PyMh files and modules.
from Modules.Computer import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.webCfgMenu ')
# Handy helper for finding external resources nearby.
webpath = os.path.join(os.path.split(__file__)[0])
templatepath = os.path.join(webpath, 'template')
class ConfigMenuElement(athena.LiveElement):
"""
"""
docFactory = loaders.xmlfile(os.path.join(templatepath, 'configMenuElement.html'))
jsClass = u'configMenu.ConfigMenuWidget'
def __init__(self, p_workspace_obj, p_params):
self.m_workspace_obj = p_workspace_obj
self.m_pyhouse_obj = p_workspace_obj.m_pyhouse_obj
self.m_params = p_params
# ## END DBK
| 167
| 0
| 29
|
800995b599b0b3eb588f67b3f508e080f4b132b3
| 647
|
py
|
Python
|
Minor Projects/record_sound.py
|
AMARTYA2020/nppy
|
7f750534bb5faa4e661447ca132077de0ce0a0ed
|
[
"MIT"
] | 4
|
2020-12-07T10:15:08.000Z
|
2021-11-17T11:21:07.000Z
|
Minor Projects/record_sound.py
|
AMARTYA2020/nppy
|
7f750534bb5faa4e661447ca132077de0ce0a0ed
|
[
"MIT"
] | null | null | null |
Minor Projects/record_sound.py
|
AMARTYA2020/nppy
|
7f750534bb5faa4e661447ca132077de0ce0a0ed
|
[
"MIT"
] | 1
|
2021-02-17T07:53:13.000Z
|
2021-02-17T07:53:13.000Z
|
import soundfile
import sounddevice
if __name__ == '__main__':
record = Record_Sound()
record.record_sound()
| 30.809524
| 129
| 0.616692
|
import soundfile
import sounddevice
class Record_Sound:
def __init__(self, name='Reocrd.wav', rate=40000, duration=60):
self.name = name # .wav (extension)
self.rate = rate # Hertz
self.duration = duration # Seconds
def record_sound(self):
'''Record sound from your speaker'''
data = sounddevice.rec(int(self.rate * self.duration), samplerate=self.rate, channels=1, blocking=True) # Recording ...
soundfile.write(self.name, data, self.rate) # Saving the file
if __name__ == '__main__':
record = Record_Sound()
record.record_sound()
| 188
| 307
| 24
|
32d27a9635f1f287ff943814b3bdebbce0ef1a4a
| 101
|
py
|
Python
|
deep_gw_pe_followup/restricted_prior/__init__.py
|
avivajpeyi/gw_pe_judge
|
151d597fdd6128a278e1d4cff65d3e6776e1fa83
|
[
"MIT"
] | null | null | null |
deep_gw_pe_followup/restricted_prior/__init__.py
|
avivajpeyi/gw_pe_judge
|
151d597fdd6128a278e1d4cff65d3e6776e1fa83
|
[
"MIT"
] | null | null | null |
deep_gw_pe_followup/restricted_prior/__init__.py
|
avivajpeyi/gw_pe_judge
|
151d597fdd6128a278e1d4cff65d3e6776e1fa83
|
[
"MIT"
] | null | null | null |
from .placeholder_prior import PlaceholderDelta, PlaceholderPrior
from .prior import RestrictedPrior
| 33.666667
| 65
| 0.881188
|
from .placeholder_prior import PlaceholderDelta, PlaceholderPrior
from .prior import RestrictedPrior
| 0
| 0
| 0
|
f8f7d1c2e739391af520fbdd2477c31b86272739
| 18,741
|
py
|
Python
|
boxsdk/object/search.py
|
bipsterbot/box-python-sdk
|
a55cbc6d8e2af6d51c30b795ad240bf8e45e922d
|
[
"Apache-2.0"
] | 367
|
2015-02-10T05:55:45.000Z
|
2022-03-16T23:39:58.000Z
|
boxsdk/object/search.py
|
bipsterbot/box-python-sdk
|
a55cbc6d8e2af6d51c30b795ad240bf8e45e922d
|
[
"Apache-2.0"
] | 686
|
2015-02-10T01:21:28.000Z
|
2022-03-31T11:40:22.000Z
|
boxsdk/object/search.py
|
bipsterbot/box-python-sdk
|
a55cbc6d8e2af6d51c30b795ad240bf8e45e922d
|
[
"Apache-2.0"
] | 260
|
2015-02-16T17:35:06.000Z
|
2022-03-20T17:45:28.000Z
|
# coding: utf-8
from __future__ import unicode_literals, absolute_import
import json
from .base_endpoint import BaseEndpoint
from ..pagination.limit_offset_based_object_collection import LimitOffsetBasedObjectCollection
from ..pagination.marker_based_object_collection import MarkerBasedObjectCollection
from ..util.api_call_decorator import api_call
from ..util.text_enum import TextEnum
class SearchScope(TextEnum):
"""Enum of possible serach scopes."""
USER = 'user_content'
ENTERPRISE = 'enterprise_content'
class TrashContent(TextEnum):
"""Enum of trash content values."""
NONE = 'non_trashed_only'
ONLY = 'trashed_only'
class MetadataSearchFilter(object):
"""
Helper class to encapsulate a single search filter. A search filter can only search against one template,
but can filter on many fields.
See :class:`MetadataSearchFilters`.
"""
def __init__(self, template_key, scope):
"""
:param template_key:
The key of the template to search on
:type template_key:
`unicode`
:param scope:
The scope of the template to search on
:type scope:
`unicode`
"""
self._template_key = template_key
self._scope = scope
self._field_filters = {}
def as_dict(self):
"""
Returns a `dict` representation of this object
:return:
The `dict` representation
:rtype:
`dict`
"""
return {
'templateKey': self._template_key,
'scope': self._scope,
'filters': self._field_filters
}
def add_value_based_filter(self, field_key, value):
"""
Add a value-based filter (used for token-based search on string fields, and exact match search on all other fields)
:param field_key:
The field key to filter on
:type field_filter:
`unicode`
:param value:
The value to use to filter
:type value:
`unicode`
"""
self._field_filters.update({field_key: value})
def add_range_filter(self, field_key, gt_value=None, lt_value=None):
"""
Add a range filter (used for ranged searches on numbers and dates)
:param field_key:
The field key to filter on
:type field_filter:
`unicode`
:param gt_value:
The lower bound of the range filter (inclusive)
:type gt_value:
`unicode` or `int` or `float` or `long` or None
:param lt_value:
The upper bound of the range filter (inclusive)
:type lt_value:
`unicode` or `int` or `float` or `long` or None
"""
range_part = {}
if gt_value:
range_part['gt'] = gt_value
if lt_value:
range_part['lt'] = lt_value
if not range_part:
raise ValueError('Should specify gt and/or lt')
self._field_filters.update({field_key: range_part})
class MetadataSearchFilters(object):
"""
Helper class to encapsulate a list of metadata search filter params (mdfilters API param)
See https://developers.box.com/metadata-api/#search for more details
"""
def as_list(self):
"""
Get a list of filters from this object to use as a parameter in the Search API
:return:
The list of filters
:rtype:
`list` of `dict`
"""
return [metadata_filter.as_dict() for metadata_filter in self._filters]
def add_filter(self, metadata_filter):
"""
Add a filter to this object. Note that the API only supports one filter.
:param metadata_filter:
The filter to add
:type metadata_filter:
:class:`MetadataSearchFilter`
"""
self._filters.append(metadata_filter)
class Search(BaseEndpoint):
"""Search Box for files and folders."""
def get_url(self, *args):
"""
Gets the search endpoint URL.
:return:
The search endpoint URL.
:rtype:
`unicode`
"""
# pylint:disable=arguments-differ
return super(Search, self).get_url('search')
@staticmethod
def start_metadata_filters():
"""
Get a :class:`MetadataSearchFilters` that represents a set of metadata filters.
:return:
The new :class:`MetadataSearchFilters`
:rtype:
:class:`MetadataSearchFilters`
"""
return MetadataSearchFilters()
@staticmethod
def make_single_metadata_filter(template_key, scope):
"""
Make a single :class:`MetadataSearchFilter` that represents a filter on a template. It must be
added to a :class:`MetadataSearchFilters`.
:return:
The new :class:`MetadataSearchFilter`
:rtype:
:class:`MetadataSearchFilter`
"""
return MetadataSearchFilter(template_key, scope)
@api_call
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
def query(
self,
query,
limit=None,
offset=0,
ancestor_folders=None,
file_extensions=None,
metadata_filters=None,
result_type=None,
content_types=None,
scope=None,
created_at_range=None,
updated_at_range=None,
size_range=None,
owner_users=None,
trash_content=None,
fields=None,
sort=None,
direction=None,
**kwargs
):
"""
Search Box for items matching the given query.
:param query:
The string to search for.
:type query:
`unicode`
:param limit:
The maximum number of items to return.
:type limit:
`int`
:param offset:
The search result at which to start the response.
:type offset:
`int`
:param ancestor_folders:
Folder ids to limit the search to.
:type ancestor_folders:
`Iterable` of :class:`Folder`
:param file_extensions:
File extensions to limit the search to.
:type file_extensions:
`iterable` of `unicode`
:param metadata_filters:
Filters used for metadata search
:type metadata_filters:
:class:`MetadataSearchFilters`
:param result_type:
Which type of result you want. Can be file or folder.
:type result_type:
`unicode`
:param content_types:
Which content types to search. Valid types include name, description, file_content, comments, and tags.
:type content_types:
`Iterable` of `unicode`
:param scope:
The scope of content to search over
:type scope:
`unicode` or None
:param created_at_range:
A tuple of the form (lower_bound, upper_bound) for the creation datetime of items to search.
:type created_at_range:
(`unicode` or None, `unicode` or None)
:param updated_at_range:
A tuple of the form (lower_bound, upper_bound) for the update datetime of items to search.
:type updated_at_range:
(`unicode` or None, `unicode` or None)
:param size_range:
A tuple of the form (lower_bound, upper_bound) for the size in bytes of items to search.
:type size_range:
(`int` or None, `int` or None)
:param owner_users:
Owner users to filter content by; only content belonging to these users will be returned.
:type owner_users:
`iterable` of :class:`User`
:param trash_content:
Whether to search trashed or non-trashed content.
:type trash_content:
`unicode` or None
:param fields:
Fields to include on the returned items.
:type fields:
`Iterable` of `unicode`
:param sort:
What to sort the search results by. Currently `modified_at`
:type sort:
`unicode` or None
:param direction:
The direction to display the sorted search results. Can be set to `DESC` for descending or `ASC` for ascending.
:type direction:
`unicode` or None
:return:
The collection of items that match the search query.
:rtype:
`Iterable` of :class:`Item`
"""
url = self.get_url()
additional_params = {'query': query}
if ancestor_folders is not None:
additional_params['ancestor_folder_ids'] = ','.join([folder.object_id for folder in ancestor_folders])
if file_extensions is not None:
additional_params['file_extensions'] = ','.join(file_extensions)
if metadata_filters is not None:
additional_params['mdfilters'] = json.dumps(metadata_filters.as_list())
if content_types is not None:
additional_params['content_types'] = ','.join(content_types)
if result_type is not None:
additional_params['type'] = result_type
if scope is not None:
additional_params['scope'] = scope
if created_at_range is not None:
additional_params['created_at_range'] = '{},{}'.format(created_at_range[0] or '', created_at_range[1] or '')
if updated_at_range is not None:
additional_params['updated_at_range'] = '{},{}'.format(updated_at_range[0] or '', updated_at_range[1] or '')
if size_range is not None:
additional_params['size_range'] = '{},{}'.format(size_range[0] or '', size_range[1] or '')
if owner_users is not None:
additional_params['owner_user_ids'] = ','.join([user.object_id for user in owner_users])
if trash_content is not None:
additional_params['trash_content'] = trash_content
if sort is not None:
additional_params['sort'] = sort
if direction is not None:
additional_params['direction'] = direction
additional_params.update(kwargs)
return LimitOffsetBasedObjectCollection(
self._session,
url,
limit=limit,
offset=offset,
fields=fields,
additional_params=additional_params,
return_full_pages=False,
)
@api_call
def metadata_query(self, from_template, ancestor_folder_id, query=None, query_params=None, use_index=None, order_by=None,
marker=None, limit=None, fields=None):
# pylint: disable=arguments-differ
"""Query Box items by their metadata.
:param from_template:
The template used in the query. Must be in the form scope.templateKey.
:type from_template:
`unicode`
:param ancestor_folder_id:
The folder_id to which to restrain the query
:type ancestor_folder_id:
`unicode`
:param query:
The logical expression of the query
:type query:
`unicode` or None
:param query_params:
Required if query present. The arguments for the query.
:type query_params:
`dict` or None
:param use_index:
The name of the index to use
:type use_index:
`unicode` or None
:param order_by:
The field_key(s) to order on and the corresponding direction(s)
:type order_by:
`list` of `dict`
:param marker:
The marker to use for requesting the next page
:type marker:
`unicode` or None
:param limit:
Max results to return for a single request (0-100 inclusive)
:type limit:
`int`
:param fields:
List of fields to request
:type fields:
`Iterable` of `unicode` or None
:returns:
An iterator of the item search results
:rtype:
:class:`BoxObjectCollection`
"""
url = super(Search, self).get_url('metadata_queries/execute_read')
data = {
'from': from_template,
'ancestor_folder_id': ancestor_folder_id
}
if query is not None:
data['query'] = query
if query_params is not None:
data['query_params'] = query_params
if use_index is not None:
data['use_index'] = use_index
if order_by is not None:
data['order_by'] = order_by
return MarkerBasedObjectCollection(
session=self._session,
url=url,
limit=limit,
marker=marker,
fields=fields,
additional_params=data,
return_full_pages=False,
use_post=True
)
@api_call
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
def query_with_shared_links(
self,
query,
limit=None,
offset=0,
ancestor_folders=None,
file_extensions=None,
metadata_filters=None,
result_type=None,
content_types=None,
scope=None,
created_at_range=None,
updated_at_range=None,
size_range=None,
owner_users=None,
trash_content=None,
fields=None,
sort=None,
direction=None,
**kwargs
):
"""
Search Box for items matching the given query. May also include items that are only accessible via recently used shared links.
:param query:
The string to search for.
:type query:
`unicode`
:param limit:
The maximum number of items to return.
:type limit:
`int`
:param offset:
The search result at which to start the response.
:type offset:
`int`
:param ancestor_folders:
Folder ids to limit the search to.
:type ancestor_folders:
`Iterable` of :class:`Folder`
:param file_extensions:
File extensions to limit the search to.
:type file_extensions:
`iterable` of `unicode`
:param metadata_filters:
Filters used for metadata search
:type metadata_filters:
:class:`MetadataSearchFilters`
:param result_type:
Which type of result you want. Can be file or folder.
:type result_type:
`unicode`
:param content_types:
Which content types to search. Valid types include name, description, file_content, comments, and tags.
:type content_types:
`Iterable` of `unicode`
:param scope:
The scope of content to search over
:type scope:
`unicode` or None
:param created_at_range:
A tuple of the form (lower_bound, upper_bound) for the creation datetime of items to search.
:type created_at_range:
(`unicode` or None, `unicode` or None)
:param updated_at_range:
A tuple of the form (lower_bound, upper_bound) for the update datetime of items to search.
:type updated_at_range:
(`unicode` or None, `unicode` or None)
:param size_range:
A tuple of the form (lower_bound, upper_bound) for the size in bytes of items to search.
:type size_range:
(`int` or None, `int` or None)
:param owner_users:
Owner users to filter content by; only content belonging to these users will be returned.
:type owner_users:
`iterable` of :class:`User`
:param trash_content:
Whether to search trashed or non-trashed content.
:type trash_content:
`unicode` or None
:param fields:
Fields to include on the returned items.
:type fields:
`Iterable` of `unicode`
:param sort:
What to sort the search results by. Currently `modified_at`
:type sort:
`unicode` or None
:param direction:
The direction to display the sorted search results. Can be set to `DESC` for descending or `ASC` for ascending.
:type direction:
`unicode` or None
:return:
The collection of items that match the search query.
:rtype:
`Iterable` of :class:`Item`
"""
url = self.get_url()
additional_params = {'query': query, 'include_recent_shared_links': True}
if ancestor_folders is not None:
additional_params['ancestor_folder_ids'] = ','.join([folder.object_id for folder in ancestor_folders])
if file_extensions is not None:
additional_params['file_extensions'] = ','.join(file_extensions)
if metadata_filters is not None:
additional_params['mdfilters'] = json.dumps(metadata_filters.as_list())
if content_types is not None:
additional_params['content_types'] = ','.join(content_types)
if result_type is not None:
additional_params['type'] = result_type
if scope is not None:
additional_params['scope'] = scope
if created_at_range is not None:
additional_params['created_at_range'] = '{},{}'.format(created_at_range[0] or '', created_at_range[1] or '')
if updated_at_range is not None:
additional_params['updated_at_range'] = '{},{}'.format(updated_at_range[0] or '', updated_at_range[1] or '')
if size_range is not None:
additional_params['size_range'] = '{},{}'.format(size_range[0] or '', size_range[1] or '')
if owner_users is not None:
additional_params['owner_user_ids'] = ','.join([user.object_id for user in owner_users])
if trash_content is not None:
additional_params['trash_content'] = trash_content
if sort is not None:
additional_params['sort'] = sort
if direction is not None:
additional_params['direction'] = direction
additional_params.update(kwargs)
return LimitOffsetBasedObjectCollection(
self._session,
url,
limit=limit,
offset=offset,
fields=fields,
additional_params=additional_params,
return_full_pages=False,
)
| 35.629278
| 134
| 0.588496
|
# coding: utf-8
from __future__ import unicode_literals, absolute_import
import json
from .base_endpoint import BaseEndpoint
from ..pagination.limit_offset_based_object_collection import LimitOffsetBasedObjectCollection
from ..pagination.marker_based_object_collection import MarkerBasedObjectCollection
from ..util.api_call_decorator import api_call
from ..util.text_enum import TextEnum
class SearchScope(TextEnum):
"""Enum of possible serach scopes."""
USER = 'user_content'
ENTERPRISE = 'enterprise_content'
class TrashContent(TextEnum):
"""Enum of trash content values."""
NONE = 'non_trashed_only'
ONLY = 'trashed_only'
class MetadataSearchFilter(object):
"""
Helper class to encapsulate a single search filter. A search filter can only search against one template,
but can filter on many fields.
See :class:`MetadataSearchFilters`.
"""
def __init__(self, template_key, scope):
"""
:param template_key:
The key of the template to search on
:type template_key:
`unicode`
:param scope:
The scope of the template to search on
:type scope:
`unicode`
"""
self._template_key = template_key
self._scope = scope
self._field_filters = {}
def as_dict(self):
"""
Returns a `dict` representation of this object
:return:
The `dict` representation
:rtype:
`dict`
"""
return {
'templateKey': self._template_key,
'scope': self._scope,
'filters': self._field_filters
}
def add_value_based_filter(self, field_key, value):
"""
Add a value-based filter (used for token-based search on string fields, and exact match search on all other fields)
:param field_key:
The field key to filter on
:type field_filter:
`unicode`
:param value:
The value to use to filter
:type value:
`unicode`
"""
self._field_filters.update({field_key: value})
def add_range_filter(self, field_key, gt_value=None, lt_value=None):
"""
Add a range filter (used for ranged searches on numbers and dates)
:param field_key:
The field key to filter on
:type field_filter:
`unicode`
:param gt_value:
The lower bound of the range filter (inclusive)
:type gt_value:
`unicode` or `int` or `float` or `long` or None
:param lt_value:
The upper bound of the range filter (inclusive)
:type lt_value:
`unicode` or `int` or `float` or `long` or None
"""
range_part = {}
if gt_value:
range_part['gt'] = gt_value
if lt_value:
range_part['lt'] = lt_value
if not range_part:
raise ValueError('Should specify gt and/or lt')
self._field_filters.update({field_key: range_part})
class MetadataSearchFilters(object):
"""
Helper class to encapsulate a list of metadata search filter params (mdfilters API param)
See https://developers.box.com/metadata-api/#search for more details
"""
def __init__(self):
self._filters = []
def as_list(self):
"""
Get a list of filters from this object to use as a parameter in the Search API
:return:
The list of filters
:rtype:
`list` of `dict`
"""
return [metadata_filter.as_dict() for metadata_filter in self._filters]
def add_filter(self, metadata_filter):
"""
Add a filter to this object. Note that the API only supports one filter.
:param metadata_filter:
The filter to add
:type metadata_filter:
:class:`MetadataSearchFilter`
"""
self._filters.append(metadata_filter)
class Search(BaseEndpoint):
"""Search Box for files and folders."""
def get_url(self, *args):
"""
Gets the search endpoint URL.
:return:
The search endpoint URL.
:rtype:
`unicode`
"""
# pylint:disable=arguments-differ
return super(Search, self).get_url('search')
@staticmethod
def start_metadata_filters():
"""
Get a :class:`MetadataSearchFilters` that represents a set of metadata filters.
:return:
The new :class:`MetadataSearchFilters`
:rtype:
:class:`MetadataSearchFilters`
"""
return MetadataSearchFilters()
@staticmethod
def make_single_metadata_filter(template_key, scope):
"""
Make a single :class:`MetadataSearchFilter` that represents a filter on a template. It must be
added to a :class:`MetadataSearchFilters`.
:return:
The new :class:`MetadataSearchFilter`
:rtype:
:class:`MetadataSearchFilter`
"""
return MetadataSearchFilter(template_key, scope)
@api_call
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
def query(
self,
query,
limit=None,
offset=0,
ancestor_folders=None,
file_extensions=None,
metadata_filters=None,
result_type=None,
content_types=None,
scope=None,
created_at_range=None,
updated_at_range=None,
size_range=None,
owner_users=None,
trash_content=None,
fields=None,
sort=None,
direction=None,
**kwargs
):
"""
Search Box for items matching the given query.
:param query:
The string to search for.
:type query:
`unicode`
:param limit:
The maximum number of items to return.
:type limit:
`int`
:param offset:
The search result at which to start the response.
:type offset:
`int`
:param ancestor_folders:
Folder ids to limit the search to.
:type ancestor_folders:
`Iterable` of :class:`Folder`
:param file_extensions:
File extensions to limit the search to.
:type file_extensions:
`iterable` of `unicode`
:param metadata_filters:
Filters used for metadata search
:type metadata_filters:
:class:`MetadataSearchFilters`
:param result_type:
Which type of result you want. Can be file or folder.
:type result_type:
`unicode`
:param content_types:
Which content types to search. Valid types include name, description, file_content, comments, and tags.
:type content_types:
`Iterable` of `unicode`
:param scope:
The scope of content to search over
:type scope:
`unicode` or None
:param created_at_range:
A tuple of the form (lower_bound, upper_bound) for the creation datetime of items to search.
:type created_at_range:
(`unicode` or None, `unicode` or None)
:param updated_at_range:
A tuple of the form (lower_bound, upper_bound) for the update datetime of items to search.
:type updated_at_range:
(`unicode` or None, `unicode` or None)
:param size_range:
A tuple of the form (lower_bound, upper_bound) for the size in bytes of items to search.
:type size_range:
(`int` or None, `int` or None)
:param owner_users:
Owner users to filter content by; only content belonging to these users will be returned.
:type owner_users:
`iterable` of :class:`User`
:param trash_content:
Whether to search trashed or non-trashed content.
:type trash_content:
`unicode` or None
:param fields:
Fields to include on the returned items.
:type fields:
`Iterable` of `unicode`
:param sort:
What to sort the search results by. Currently `modified_at`
:type sort:
`unicode` or None
:param direction:
The direction to display the sorted search results. Can be set to `DESC` for descending or `ASC` for ascending.
:type direction:
`unicode` or None
:return:
The collection of items that match the search query.
:rtype:
`Iterable` of :class:`Item`
"""
url = self.get_url()
additional_params = {'query': query}
if ancestor_folders is not None:
additional_params['ancestor_folder_ids'] = ','.join([folder.object_id for folder in ancestor_folders])
if file_extensions is not None:
additional_params['file_extensions'] = ','.join(file_extensions)
if metadata_filters is not None:
additional_params['mdfilters'] = json.dumps(metadata_filters.as_list())
if content_types is not None:
additional_params['content_types'] = ','.join(content_types)
if result_type is not None:
additional_params['type'] = result_type
if scope is not None:
additional_params['scope'] = scope
if created_at_range is not None:
additional_params['created_at_range'] = '{},{}'.format(created_at_range[0] or '', created_at_range[1] or '')
if updated_at_range is not None:
additional_params['updated_at_range'] = '{},{}'.format(updated_at_range[0] or '', updated_at_range[1] or '')
if size_range is not None:
additional_params['size_range'] = '{},{}'.format(size_range[0] or '', size_range[1] or '')
if owner_users is not None:
additional_params['owner_user_ids'] = ','.join([user.object_id for user in owner_users])
if trash_content is not None:
additional_params['trash_content'] = trash_content
if sort is not None:
additional_params['sort'] = sort
if direction is not None:
additional_params['direction'] = direction
additional_params.update(kwargs)
return LimitOffsetBasedObjectCollection(
self._session,
url,
limit=limit,
offset=offset,
fields=fields,
additional_params=additional_params,
return_full_pages=False,
)
@api_call
def metadata_query(self, from_template, ancestor_folder_id, query=None, query_params=None, use_index=None, order_by=None,
marker=None, limit=None, fields=None):
# pylint: disable=arguments-differ
"""Query Box items by their metadata.
:param from_template:
The template used in the query. Must be in the form scope.templateKey.
:type from_template:
`unicode`
:param ancestor_folder_id:
The folder_id to which to restrain the query
:type ancestor_folder_id:
`unicode`
:param query:
The logical expression of the query
:type query:
`unicode` or None
:param query_params:
Required if query present. The arguments for the query.
:type query_params:
`dict` or None
:param use_index:
The name of the index to use
:type use_index:
`unicode` or None
:param order_by:
The field_key(s) to order on and the corresponding direction(s)
:type order_by:
`list` of `dict`
:param marker:
The marker to use for requesting the next page
:type marker:
`unicode` or None
:param limit:
Max results to return for a single request (0-100 inclusive)
:type limit:
`int`
:param fields:
List of fields to request
:type fields:
`Iterable` of `unicode` or None
:returns:
An iterator of the item search results
:rtype:
:class:`BoxObjectCollection`
"""
url = super(Search, self).get_url('metadata_queries/execute_read')
data = {
'from': from_template,
'ancestor_folder_id': ancestor_folder_id
}
if query is not None:
data['query'] = query
if query_params is not None:
data['query_params'] = query_params
if use_index is not None:
data['use_index'] = use_index
if order_by is not None:
data['order_by'] = order_by
return MarkerBasedObjectCollection(
session=self._session,
url=url,
limit=limit,
marker=marker,
fields=fields,
additional_params=data,
return_full_pages=False,
use_post=True
)
@api_call
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
def query_with_shared_links(
self,
query,
limit=None,
offset=0,
ancestor_folders=None,
file_extensions=None,
metadata_filters=None,
result_type=None,
content_types=None,
scope=None,
created_at_range=None,
updated_at_range=None,
size_range=None,
owner_users=None,
trash_content=None,
fields=None,
sort=None,
direction=None,
**kwargs
):
"""
Search Box for items matching the given query. May also include items that are only accessible via recently used shared links.
:param query:
The string to search for.
:type query:
`unicode`
:param limit:
The maximum number of items to return.
:type limit:
`int`
:param offset:
The search result at which to start the response.
:type offset:
`int`
:param ancestor_folders:
Folder ids to limit the search to.
:type ancestor_folders:
`Iterable` of :class:`Folder`
:param file_extensions:
File extensions to limit the search to.
:type file_extensions:
`iterable` of `unicode`
:param metadata_filters:
Filters used for metadata search
:type metadata_filters:
:class:`MetadataSearchFilters`
:param result_type:
Which type of result you want. Can be file or folder.
:type result_type:
`unicode`
:param content_types:
Which content types to search. Valid types include name, description, file_content, comments, and tags.
:type content_types:
`Iterable` of `unicode`
:param scope:
The scope of content to search over
:type scope:
`unicode` or None
:param created_at_range:
A tuple of the form (lower_bound, upper_bound) for the creation datetime of items to search.
:type created_at_range:
(`unicode` or None, `unicode` or None)
:param updated_at_range:
A tuple of the form (lower_bound, upper_bound) for the update datetime of items to search.
:type updated_at_range:
(`unicode` or None, `unicode` or None)
:param size_range:
A tuple of the form (lower_bound, upper_bound) for the size in bytes of items to search.
:type size_range:
(`int` or None, `int` or None)
:param owner_users:
Owner users to filter content by; only content belonging to these users will be returned.
:type owner_users:
`iterable` of :class:`User`
:param trash_content:
Whether to search trashed or non-trashed content.
:type trash_content:
`unicode` or None
:param fields:
Fields to include on the returned items.
:type fields:
`Iterable` of `unicode`
:param sort:
What to sort the search results by. Currently `modified_at`
:type sort:
`unicode` or None
:param direction:
The direction to display the sorted search results. Can be set to `DESC` for descending or `ASC` for ascending.
:type direction:
`unicode` or None
:return:
The collection of items that match the search query.
:rtype:
`Iterable` of :class:`Item`
"""
url = self.get_url()
additional_params = {'query': query, 'include_recent_shared_links': True}
if ancestor_folders is not None:
additional_params['ancestor_folder_ids'] = ','.join([folder.object_id for folder in ancestor_folders])
if file_extensions is not None:
additional_params['file_extensions'] = ','.join(file_extensions)
if metadata_filters is not None:
additional_params['mdfilters'] = json.dumps(metadata_filters.as_list())
if content_types is not None:
additional_params['content_types'] = ','.join(content_types)
if result_type is not None:
additional_params['type'] = result_type
if scope is not None:
additional_params['scope'] = scope
if created_at_range is not None:
additional_params['created_at_range'] = '{},{}'.format(created_at_range[0] or '', created_at_range[1] or '')
if updated_at_range is not None:
additional_params['updated_at_range'] = '{},{}'.format(updated_at_range[0] or '', updated_at_range[1] or '')
if size_range is not None:
additional_params['size_range'] = '{},{}'.format(size_range[0] or '', size_range[1] or '')
if owner_users is not None:
additional_params['owner_user_ids'] = ','.join([user.object_id for user in owner_users])
if trash_content is not None:
additional_params['trash_content'] = trash_content
if sort is not None:
additional_params['sort'] = sort
if direction is not None:
additional_params['direction'] = direction
additional_params.update(kwargs)
return LimitOffsetBasedObjectCollection(
self._session,
url,
limit=limit,
offset=offset,
fields=fields,
additional_params=additional_params,
return_full_pages=False,
)
| 25
| 0
| 26
|
542147158cd8469f8c735caf9e061a417df66d13
| 90
|
py
|
Python
|
graphs/word_ladder_problem.py
|
gcvalderrama/python_foundations
|
5ac045085dcc6c906729b481f833fa6a7889bd19
|
[
"MIT"
] | null | null | null |
graphs/word_ladder_problem.py
|
gcvalderrama/python_foundations
|
5ac045085dcc6c906729b481f833fa6a7889bd19
|
[
"MIT"
] | null | null | null |
graphs/word_ladder_problem.py
|
gcvalderrama/python_foundations
|
5ac045085dcc6c906729b481f833fa6a7889bd19
|
[
"MIT"
] | null | null | null |
import unittest
| 11.25
| 30
| 0.677778
|
import unittest
class Test(unittest.TestCase):
def test_case_a(self):
pass
| 14
| 9
| 50
|
937387e12ccefb14cba396ae5fd45c6a4b37f34d
| 1,006
|
py
|
Python
|
kq/job.py
|
Semo/kq
|
024cc52b10b2af0c2999a20920faa460442bcbd6
|
[
"MIT"
] | 582
|
2016-10-31T04:26:28.000Z
|
2022-03-30T12:57:14.000Z
|
kq/job.py
|
Semo/kq
|
024cc52b10b2af0c2999a20920faa460442bcbd6
|
[
"MIT"
] | 17
|
2016-11-01T16:37:16.000Z
|
2022-02-10T06:47:36.000Z
|
kq/job.py
|
Semo/kq
|
024cc52b10b2af0c2999a20920faa460442bcbd6
|
[
"MIT"
] | 26
|
2016-11-01T05:06:02.000Z
|
2022-02-04T12:44:36.000Z
|
from dataclasses import dataclass
from typing import Any, Callable, Dict, Optional, Sequence, Union
@dataclass(frozen=True)
| 27.944444
| 65
| 0.682903
|
from dataclasses import dataclass
from typing import Any, Callable, Dict, Optional, Sequence, Union
@dataclass(frozen=True)
class Job:
# KQ job UUID
id: Optional[str] = None
# Unix timestamp indicating when the job was queued.
timestamp: Optional[int] = None
# Name of the Kafka topic.
topic: Optional[str] = None
# Function to execute.
func: Optional[Callable[..., Any]] = None
# Positional arguments for the function.
args: Optional[Sequence[Any]] = None
# Keyword arguments for the function.
kwargs: Optional[Dict[str, Any]] = None
# Job timeout threshold in seconds.
timeout: Optional[Union[float, int]] = None
# Kafka message key. Jobs with the same keys are sent
# to the same topic partition and executed sequentially.
# Applies only when the "partition" field is not set.
key: Optional[bytes] = None
# Kafka topic partition. If set, the "key" field is ignored.
partition: Optional[Union[float, int]] = None
| 0
| 858
| 22
|
c3f525a433e2a6d4ca2d9c1ef91bbd4e3b476655
| 17,629
|
py
|
Python
|
toast_planck/preproc_modules/taudeconvolve.py
|
planck-npipe/toast-npipe
|
ca3e92ea3a81a6146e246ec1d0c5bdcaea3b49f2
|
[
"BSD-2-Clause"
] | 1
|
2021-04-20T08:09:35.000Z
|
2021-04-20T08:09:35.000Z
|
toast_planck/preproc_modules/taudeconvolve.py
|
planck-npipe/toast-npipe
|
ca3e92ea3a81a6146e246ec1d0c5bdcaea3b49f2
|
[
"BSD-2-Clause"
] | null | null | null |
toast_planck/preproc_modules/taudeconvolve.py
|
planck-npipe/toast-npipe
|
ca3e92ea3a81a6146e246ec1d0c5bdcaea3b49f2
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
from scipy import optimize
from scipy.signal import fftconvolve
from toast.mpi import MPI
import numpy as np
import toast.timing as timing
from . import time_response_tools
| 42.997561
| 201
| 0.512054
|
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
from scipy import optimize
from scipy.signal import fftconvolve
from toast.mpi import MPI
import numpy as np
import toast.timing as timing
from . import time_response_tools
class TauDeconvolver():
def __init__(self, bolo_id, IMO, filterlen=2 ** 20, fsample=180.3737,
lfer='LFER8', overlap=10000, extra_global_offset=None,
filterfile=None, tabulated_tf=None, fnorm=0.016, comm=None,
normalize_filter=True):
"""
Instantiate the deconvolution object
bolo_id -- Bolometer ID (e.g. 00_100_1a)
IMO -- Either an IMO object or a path to IMO XML dump
filterlen -- Fourier transform length, actual length will be a
power of 2 AT LEAST as long as this
fsample -- fixed sampling frequency
lfer -- Transfer function to seek from the IMO and deconvolve
overlap -- number of samples read for boundary effects. These
are not written into the filtered TOI
notch -- vector of line frequencies to notch out
wnotch -- relative width of the notch
extra_global_offset -- add another phase shift by hand; in same
units as global_offset in IMO.
tabulated_tf(None) -- When set, overrides LFER and IMO and
filterfile. A 3-element tuple containing frequency, real,
imaginary
filterfile(None) -- When set, overrides LFER and IMO. A 3 column
ASCII file containing the transfer function to convolve with
fnorm -- the frequency at which the transfer function is
normalized to 1.0. default is the dipole frequency.
"""
self.bolo_id = bolo_id
self.IMO = IMO
self.filterlen = 2
while self.filterlen < filterlen or self.filterlen < 3 * overlap:
self.filterlen *= 2
self.overlap = overlap
self.comm = comm
if self.comm is None:
self.ntask = 1
self.rank = 0
else:
self.ntask = self.comm.size
self.rank = self.comm.rank
self.normalize_filter = normalize_filter
# DEBUG begin
if self.rank == 0:
print("Initializing TauDeconvolver. bolo_id = {}, IMO = {}, filterlen = {}, fsample = {}, lfer = {}, filterfile = {}".format(bolo_id, IMO, filterlen, fsample, lfer, filterfile), flush=True)
# DEBUG end
freq = np.fft.rfftfreq(self.filterlen, 1. / fsample)
self.freq = freq
if tabulated_tf is not None:
self.tf = np.interp(
self.freq, tabulated_tf[0],
tabulated_tf[1]) + 1j * np.interp(self.freq, tabulated_tf[0],
tabulated_tf[2])
if self.normalize_filter:
norm = np.abs(
np.interp(fnorm, tabulated_tf[0], tabulated_tf[1]) +
1j * np.interp(fnorm, tabulated_tf[0], tabulated_tf[2]))
self.tf = self.tf / norm
self.tfinv = 1. / self.tf
self.tfinv[np.abs(self.tf) < 1e-4] = 0
self.lowpass = time_response_tools.filter_function(freq)
self.filter = self.lowpass * self.tfinv
self.fsample = fsample
if extra_global_offset is not None:
if extra_global_offset != 0.0:
phase = -2. * np.pi * extra_global_offset * freq / fsample
shift_tf = np.cos(phase) + 1j * np.sin(phase)
self.filter /= shift_tf
self.tf *= shift_tf
else:
self.filterfile = filterfile
if self.filterfile is not None:
if self.rank == 0:
try:
filt = np.genfromtxt(filterfile).T
except Exception as e:
raise Exception('Failed to load filter function from '
'{}: {}'.format(filterfile, e))
else:
filt = None
if self.comm is not None:
filt = self.comm.bcast(filt)
self.filter = np.interp(self.freq, filt[0], filt[1]) + \
1j * np.interp(self.freq, filt[0], filt[2])
if self.normalize_filter:
norm = np.abs(np.interp(fnorm, filt[0], filt[1]) +
1j * np.interp(fnorm, filt[0], filt[2]))
self.filter = self.filter / norm
# Invert the filter to allow convolving
self.tf = self.filter.copy()
good = self.filter != 0
self.tf[good] = 1. / self.filter[good]
self.tf[np.abs(self.filter) < 1e-4] = 0
else:
self.global_offset = self.IMO.get(
'IMO:HFI:DET:Phot_Pixel Name="{}":NoiseAndSyst:TimeResp:'
'LFER8:global_offset'.format(bolo_id), np.float64)
if extra_global_offset is not None:
self.global_offset += extra_global_offset
self.pars = {}
npole = 0
if lfer == 'LFER8':
prefix = 'IMO:HFI:DET:Phot_Pixel Name="{}":NoiseAndSyst:' \
'TimeResp:LFER8:'.format(bolo_id)
self.pars['a1'] = self.IMO.get(prefix + 'par1', np.float64)
self.pars['a2'] = self.IMO.get(prefix + 'par2', np.float64)
self.pars['a3'] = self.IMO.get(prefix + 'par3', np.float64)
self.pars['a4'] = self.IMO.get(prefix + 'par9', np.float64)
self.pars['a5'] = self.IMO.get(prefix + 'par11', np.float64)
self.pars['a6'] = self.IMO.get(prefix + 'par13', np.float64)
self.pars['a7'] = self.IMO.get(prefix + 'par15', np.float64)
self.pars['a8'] = self.IMO.get(prefix + 'par17', np.float64)
self.pars['tau1'] = self.IMO.get(prefix + 'par4',
np.float64)
self.pars['tau2'] = self.IMO.get(prefix + 'par5',
np.float64)
self.pars['tau3'] = self.IMO.get(prefix + 'par6',
np.float64)
self.pars['tau4'] = self.IMO.get(prefix + 'par10',
np.float64)
self.pars['tau5'] = self.IMO.get(prefix + 'par12',
np.float64)
self.pars['tau6'] = self.IMO.get(prefix + 'par14',
np.float64)
self.pars['tau7'] = self.IMO.get(prefix + 'par16',
np.float64)
self.pars['tau8'] = self.IMO.get(prefix + 'par18',
np.float64)
self.pars['tau_stray'] = self.IMO.get(prefix + 'par7',
np.float64)
self.pars['Sphase'] = self.IMO.get(prefix + 'par8',
np.float64)
prefix = 'IMO:HFI:DET:Phot_Pixel Name="{}":NoiseAndSyst:' \
'TimeResp:SallenKeyHPF:'.format(bolo_id)
self.pars['tauhp1'] = self.IMO.get(prefix + 'tauhp1',
np.float64)
self.pars['tauhp2'] = self.IMO.get(prefix + 'tauhp2',
np.float64)
npole = 8
for i in range(8, 0, -1):
if self.pars['tau' + str(i)] != 0:
break
npole -= 1
if self.pars['tauhp1'] != self.pars['tauhp2']:
raise Exception(
'Don\'t know how to handle the case where tauhp1 '
'({}) is not equal to tauhp2 ({})'.format(
self.pars['tauhp1'], self.pars['tauhp2']))
elif lfer == 'LFER1':
npole = 1
self.pars['a1'] = 1.0
self.pars['tau1'] = 0.01
self.pars['tau_stray'] = 2.095108e-03
self.pars['Sphase'] = 0.0
else:
raise Exception(
'Don\'t know how to parse {} transfer function '
'parameters from IMO'.format(lfer))
norm_f = np.array([0.0, fnorm])
norm_tf = time_response_tools.LFERn(norm_f, npole, self.pars)
phase = -2. * np.pi * self.global_offset * norm_f / fsample
shift_tf = np.cos(phase) + 1j * np.sin(phase)
norm_tf = norm_tf * (np.cos(phase) + 1j * np.sin(phase))
norm = np.abs(norm_tf[1])
tstart = MPI.Wtime()
if self.ntask == 1:
self.tf = time_response_tools.LFERn(freq, npole, self.pars) \
/ norm
else:
nfreq = len(freq)
nfreq_task = np.int(np.ceil(nfreq / self.ntask))
# First frequency must be zero for normalization
my_freq = np.hstack(
[[0.0],
freq[nfreq_task * self.rank:
nfreq_task * (self.rank + 1)]])
# Discard the extra frequency bin here
my_tf = time_response_tools.LFERn(
my_freq, npole, self.pars)[1:] / norm
self.tf = np.hstack(self.comm.allgather(my_tf))
tstop = MPI.Wtime()
if self.rank == 0:
print('Computed the LFER transfer function in {:.2f} s.'
''.format(tstop - tstart), flush=True)
self.tfinv = 1. / self.tf
self.tfinv[np.abs(self.tf) < 1e-4] = 0
self.lowpass = time_response_tools.filter_function(freq)
self.filter = self.lowpass * self.tfinv
self.fsample = fsample
phase = -2. * np.pi * self.global_offset * freq / fsample
shift_tf = np.cos(phase) + 1j * np.sin(phase)
self.filter /= shift_tf
self.tf *= shift_tf
self.init_flag_kernels()
return
def _trim_flag_kernel(self, kernel, center, tol=.1):
"""
Extract the center of the kernel
"""
ind = np.abs(np.arange(kernel.size) - center, dtype=np.int)
kernel = np.abs(kernel) > np.abs(np.amax(kernel)) * tol
wkernel = np.amax(kernel * ind)
ind = slice(center - wkernel, center + wkernel + 1)
return kernel[ind]
def init_flag_kernels(self):
"""
When (de)convolving the signal, some number of unflagged samples
become compromised by the flagged samples. Here we determine the
time-domain kernels to convolve the flags with.
"""
x = np.zeros(self.filterlen)
center = self.filterlen // 2
x[center] = 1
tfkernel = np.fft.irfft(np.fft.rfft(x) * self.tf, self.filterlen)
filterkernel = np.fft.irfft(np.fft.rfft(x) * self.filter,
self.filterlen)
self.tfkernel = self._trim_flag_kernel(tfkernel, center)
self.filterkernel = self._trim_flag_kernel(filterkernel, center)
return
def convolve(self, signal, flag):
return self.deconvolve(signal, flag, convolve_instead=True)
def deconvolve(self, signal_in, flag_in, convolve_instead=False):
"""
Deconvolve the precomputed transfer function.
Extend the flags appropriately.
"""
ntot = signal_in.size
signal_out = np.zeros(ntot)
buf = np.zeros(self.filterlen)
istart = 0
while istart < ntot:
nleft = len(signal_in) - istart
nprocess = min(nleft, self.filterlen)
istop = istart + nprocess
buf[:nprocess] = signal_in[istart:istop]
buf[nprocess:] = 0
bufstart = 0
bufstop = nprocess
if istart != 0:
istart += self.overlap
bufstart += self.overlap
if istop != signal_in.size:
istop -= self.overlap
bufstop -= self.overlap
if convolve_instead:
signal_out[istart:istop] = np.fft.irfft(
np.fft.rfft(buf) * self.tf,
self.filterlen)[bufstart:bufstop]
else:
signal_out[istart:istop] = np.fft.irfft(
np.fft.rfft(buf) * self.filter,
self.filterlen)[bufstart:bufstop]
if istop == ntot:
break
istart = istop - self.overlap
signal_out = signal_out.astype(signal_in.dtype)
if flag_in is not None:
if convolve_instead:
flag_out = fftconvolve(
flag_in != 0, self.tfkernel, mode='same') > 1e-3
else:
flag_out = fftconvolve(
flag_in != 0, self.filterkernel, mode='same') > 1e-3
flag_out = flag_out.astype(flag_in.dtype)
else:
flag_out = None
return signal_out, flag_out
def fit_relative_offset(self, npole, new_pars, sim_fwhm_arcmin=4.5,
sim_length=1.0):
"""
Compute the relative phase shift between this time response
and a new one. The new time response function is parameterized
as a different LFERn function with npole poles.
The return value is the shift in number of samples; should be
compatible with the input to global_offset defined in the
constructor method.
npole : number of poles in the filter
new_pars : parameters specified in the same dictionary format as
in the constructor
sim_fwhm_arcmin : simulated Gaussian signal FWHM in arcmin
sim_length : simulation length in seconds
sim_time_step : simulation time step between samples
"""
# generate the trial transfer function
freq = np.fft.rfftfreq(self.filterlen, 1. / self.fsample)
trial_filter = time_response_tools.LFERn(freq, npole, new_pars)
# generate a Gaussian timeline to use as an input
sigma_seconds = sim_fwhm_arcmin / 60 / 6 / np.sqrt(8 * np.log(2))
sim_time_step = sim_length / self.filterlen
time = np.arange(0.0, sim_length, sim_time_step)
model_tod = np.exp(-(time - (sim_length / 2)) ** 2
/ 2 / sigma_seconds ** 2)
filtered_tod = np.fft.irfft(
np.fft.rfft(model_tod) * self.filter / trial_filter,
self.filterlen).real
# internal error functions
def func_gauss(p, xin):
return p[0] * np.exp(-(xin - p[1]) ** 2 / (2.*p[2] ** 2))
def chi2_nosigma(p, xin, d):
return ((func_gauss(p, xin) - d) ** 2).sum()
par_guess = [1., 0.5, 0.1]
par_fit = optimize.fmin(
chi2_nosigma, par_guess, args=(time, filtered_tod),
disp=False, maxiter=10000, maxfun=10000, xtol=0.01)
relative_shift = -(sim_length / 2.0 - par_fit[1]) * self.fsample
return relative_shift
def fit_absolute_offset(self, sim_fwhm_arcmin=4.5, sim_length=1.0):
"""
Compute and correct the phase shift between this filter
function and NO filtering. This function is useful only for a
simulated time stream.
sim_fwhm_arcmin : simulated Gaussian signal FWHM in arcmin
sim_length : simulation length in seconds
"""
# generate the trial transfer function
freq = np.fft.rfftfreq(self.filterlen, 1. / self.fsample)
# generate a Gaussian timeline to use as an input
sigma_seconds = sim_fwhm_arcmin / 60 / 6 / np.sqrt(8 * np.log(2))
sim_time_step = sim_length / self.filterlen
time = np.arange(0.0, sim_length, sim_time_step)
model_tod = np.exp(-(time - (sim_length / 2.0)) ** 2 / 2.0
/ sigma_seconds ** 2)
filtered_tod = np.fft.irfft(
np.fft.rfft(model_tod) * self.filter, self.filterlen).real
# internal error functions
def func_gauss(p, xin):
return p[0] * np.exp(-(xin - p[1]) ** 2 / (2.*p[2] ** 2))
def chi2_nosigma(p, xin, d):
return ((func_gauss(p, xin) - d) ** 2).sum()
par_guess = [1., 0.5, 0.1]
par_fit = optimize.fmin(
chi2_nosigma, par_guess, args=(time, filtered_tod), disp=False,
maxiter=10000, maxfun=10000, xtol=0.01)
relative_shift = -(sim_length / 2.0 - par_fit[1]) * self.fsample
phase = -2 * np.pi * relative_shift * freq / self.fsample
shift_tf = np.cos(phase) + 1j * np.sin(phase)
self.filter /= shift_tf
self.tf *= shift_tf
return relative_shift
| 352
| 16,884
| 23
|
dcaa38dffec7b18b21d57d14b209e0915d096ecf
| 10,860
|
py
|
Python
|
workshop_material/tools.py
|
nrupatunga/pyimageconf2018
|
2f4c83a78206106b50835730749028a03fbbc565
|
[
"BSL-1.0"
] | 106
|
2018-08-30T01:45:38.000Z
|
2021-06-03T11:05:15.000Z
|
workshop_material/tools.py
|
nrupatunga/pyimageconf2018
|
2f4c83a78206106b50835730749028a03fbbc565
|
[
"BSL-1.0"
] | 3
|
2019-04-12T02:03:25.000Z
|
2019-05-07T00:16:55.000Z
|
workshop_material/tools.py
|
nrupatunga/pyimageconf2018
|
2f4c83a78206106b50835730749028a03fbbc565
|
[
"BSL-1.0"
] | 36
|
2018-08-30T04:08:31.000Z
|
2021-05-18T07:02:10.000Z
|
import numpy as np
from math import pi,cos,sin,sqrt
from dlib import point, get_rect, center
import dlib
###########################################################################################
###########################################################################################
###########################################################################################
###########################################################################################
###########################################################################################
def real_eigenvalues(xx, xy, yy):
"Return the eigenvalues of the matrix [xx xy; xy yy]"
b = -(xx + yy)
c = xx*yy - xy*xy
disc = b*b - 4*c
if (disc >= 0):
disc = sqrt(disc)
else:
disc = 0
v0 = (-b + disc)/2
v1 = (-b - disc)/2
return (v0,v1)
###########################################################################################
from dlib import intersect, angle_between_lines, polygon_area, count_points_on_side_of_line
from dlib import count_points_between_lines, length
###########################################################################################
###########################################################################################
###########################################################################################
| 35.032258
| 117
| 0.53628
|
import numpy as np
from math import pi,cos,sin,sqrt
from dlib import point, get_rect, center
import dlib
###########################################################################################
class hough_transform:
def __init__(self, size):
self.size = size
def perform_generic_hough_transform(self, img, record_hit):
assert(img.shape[0] == self.size)
assert(img.shape[1] == self.size)
cent = center(get_rect(img))
even_size = self.size - (self.size%2)
sqrt_2 = sqrt(2)
for r in range(img.shape[0]):
for c in range(img.shape[1]):
val = img[r][c]
if (val != 0):
x = c - cent.x
y = r - cent.y
# Now draw the curve in Hough space for this image point
for t in range(self.size):
theta = t*pi/even_size
radius = (x*cos(theta) + y*sin(theta))/sqrt_2 + even_size/2 + 0.5
rr = int(radius)
record_hit(point(t,rr), point(c,r), val)
def __call__(self, img):
himg = np.zeros(img.shape, dtype='float32')
def record_hit(hough_point, img_point, value):
nonlocal himg
himg[hough_point.y][hough_point.x] += value
self.perform_generic_hough_transform(img, record_hit)
return himg
###########################################################################################
def coherent_hough_transform(ht, edges, horz, vert):
hcoherent = np.zeros((ht.size, ht.size, 3), dtype='float32')
def record_hit(hough_point, img_point, value):
x = horz[img_point.y][img_point.x]
y = vert[img_point.y][img_point.x]
# accumulate hessian matrices
hcoherent[hough_point.y][hough_point.x][0] += x*x
hcoherent[hough_point.y][hough_point.x][1] += x*y
hcoherent[hough_point.y][hough_point.x][2] += y*y
ht.perform_generic_hough_transform(edges, record_hit)
himg = np.zeros((ht.size, ht.size), dtype='float32')
for r in range(himg.shape[0]):
for c in range(himg.shape[1]):
ev = real_eigenvalues(hcoherent[r][c][0], hcoherent[r][c][1], hcoherent[r][c][2])
if (max(ev) != 0 and min(ev)/max(ev) < 0.30):
himg[r][c] = max(ev)
else:
himg[r][c] = 0
return himg
###########################################################################################
def label_blobs_with_similar_angles(img, horz, vert, angle_threshold):
labels = np.zeros(img.shape, dtype='uint32')
dotprod_angle_thresh = cos(angle_threshold*pi/180)
next_label = 1
area = get_rect(img)
for r in range(img.shape[0]):
for c in range(img.shape[1]):
# skip already labeled pixels or background pixels
if (labels[r][c] != 0 or img[r][c] == 0):
continue
labels[r][c] = next_label
# now label all the connected neighbors of this point
neighbors = [(c,r)]
while len(neighbors) > 0:
x,y = neighbors.pop()
window = [(x-1,y-1), (x,y-1), (x+1,y-1),
(x-1,y), (x+1,y),
(x-1,y+1), (x,y+1), (x+1,y+1)]
for xx,yy in window:
# If this neighbor is in the image, not background, and not already labeled
if (area.contains(xx,yy) and img[yy][xx]!=0 and labels[yy][xx]==0):
dotprod = horz[y][x]*horz[yy][xx] + vert[y][x]*vert[yy][xx]
# if the angle between these two vectors is less than angle_threshold degrees.
if dotprod > dotprod_angle_thresh:
labels[yy][xx] = next_label
neighbors.append((xx,yy))
next_label += 1
return labels, next_label
###########################################################################################
def discard_wacky_edge_groups (edges, horz, vert):
labels, num_blobs = label_blobs_with_similar_angles(edges, horz, vert, 25)
blob_sizes = dlib.get_histogram(labels, num_blobs)
# blank out short edges
for r in range(edges.shape[0]):
for c in range(edges.shape[1]):
if blob_sizes[labels[r][c]] < 20:
edges[r][c] = 0
###########################################################################################
def real_eigenvalues(xx, xy, yy):
"Return the eigenvalues of the matrix [xx xy; xy yy]"
b = -(xx + yy)
c = xx*yy - xy*xy
disc = b*b - 4*c
if (disc >= 0):
disc = sqrt(disc)
else:
disc = 0
v0 = (-b + disc)/2
v1 = (-b - disc)/2
return (v0,v1)
###########################################################################################
from dlib import intersect, angle_between_lines, polygon_area, count_points_on_side_of_line
from dlib import count_points_between_lines, length
def find_hough_boxes_simple(ht, hits):
# convert hough coordinates into lines in original image
lines = [ht.get_line(h) for h in hits]
angle_thresh = 20 # in degrees
def are_parallel(a,b):
intersects_outside_image = not get_rect(ht).contains(intersect(a,b))
return angle_between_lines(a,b) < angle_thresh and intersects_outside_image
# find all the parallel lines
parallel = []
for i in range(len(lines)):
for j in range(i+1,len(lines)):
if are_parallel(lines[i], lines[j]):
parallel.append((lines[i], lines[j], i, j))
def line_separation(a,b):
center1 = (a.p1+a.p2)/2
center2 = (b.p1+b.p2)/2
return length(center1-center2)
# sort the parallel line pairs so that lines that are most separated come first:
parallel = sorted(parallel, key=lambda a : line_separation(a[0],a[1]), reverse=True)
print("number of parallel line pairs: ", len(parallel))
boxes = []
area = get_rect(ht)
# Now find boxes, these are pairs of parallel lines where all the intersecting points
# are contained within the original image.
for i in range(len(parallel)):
for j in range(i+1,len(parallel)):
l1,l3, idx1,idx3 = parallel[i]
l2,l4, idx2,idx4 = parallel[j]
c1 = intersect(l1,l2)
c2 = intersect(l2,l3)
c3 = intersect(l3,l4)
c4 = intersect(l4,l1)
# skip this pair if it's outside the image
if (not area.contains(c1) or
not area.contains(c2) or
not area.contains(c3) or
not area.contains(c4) ):
continue
polyarea = polygon_area([c1, c2, c3, c4])
boxes.append((c1,c2,c3,c4,polyarea,idx1,idx2,idx3,idx4))
boxes = sorted(boxes, key=lambda x : x[4], reverse=True)
return boxes
###########################################################################################
def find_hough_boxes_less_simple(ht, hits, line_pixels):
assert(len(hits) == len(line_pixels))
boxes = []
for box in find_hough_boxes_simple(ht, hits):
c1,c2,c3,c4,polyarea,idx1,idx2,idx3,idx4 = box
pix1 = line_pixels[idx1]
pix2 = line_pixels[idx2]
pix3 = line_pixels[idx3]
pix4 = line_pixels[idx4]
l1 = ht.get_line(hits[idx1])
l2 = ht.get_line(hits[idx2])
l3 = ht.get_line(hits[idx3])
l4 = ht.get_line(hits[idx4])
center = (c1 + c2 + c3 + c4)/4
# check if all the corners are connected to each other
dist = 20
num_required = 15
if (count_points_on_side_of_line(l1, center, pix2, 1, dist) >= num_required and
count_points_on_side_of_line(l2, center, pix1, 1, dist) >= num_required and
count_points_on_side_of_line(l3, center, pix4, 1, dist) >= num_required and
count_points_on_side_of_line(l4, center, pix3, 1, dist) >= num_required and
count_points_on_side_of_line(l2, center, pix3, 1, dist) >= num_required and
count_points_on_side_of_line(l3, center, pix2, 1, dist) >= num_required and
count_points_on_side_of_line(l4, center, pix1, 1, dist) >= num_required and
count_points_on_side_of_line(l1, center, pix4, 1, dist) >= num_required):
boxes.append((c1,c2,c3,c4,polyarea,idx1,idx2,idx3,idx4))
return boxes
###########################################################################################
def find_hough_boxes(ht, hits, line_pixels):
assert(len(hits) == len(line_pixels))
boxes = []
for box in find_hough_boxes_simple(ht, hits):
c1,c2,c3,c4,polyarea,idx1,idx2,idx3,idx4 = box
pix1 = line_pixels[idx1]
pix2 = line_pixels[idx2]
pix3 = line_pixels[idx3]
pix4 = line_pixels[idx4]
l1 = ht.get_line(hits[idx1])
l2 = ht.get_line(hits[idx2])
l3 = ht.get_line(hits[idx3])
l4 = ht.get_line(hits[idx4])
center = (c1 + c2 + c3 + c4)/4
def corners_connected(l1,l2, pix1,pix2):
if len(pix1) == 0 or len(pix2) == 0:
return False
dist = 20
corners_touch = False
pts_in_l2_next_to_corner = count_points_on_side_of_line(l1, center, pix2, 1, dist)
pts_in_l1_next_to_corner = count_points_on_side_of_line(l2, center, pix1, 1, dist)
l2_near_corner = pts_in_l2_next_to_corner >= 15
l1_near_corner = pts_in_l1_next_to_corner >= 15
corners_touch = l1_near_corner and l2_near_corner
corner = intersect(l1,l2)
point_outside_box = 2*(corner-center) + center
l2_near_corner = pts_in_l2_next_to_corner >= 5
l1_near_corner = pts_in_l1_next_to_corner >= 5
# The two lines are connected if they touch or if none of them
# extends outside the bounds of the rectangle and at least one of
# them goes up to the edge of the rectangle.
return corners_touch or (count_points_on_side_of_line(l1, point_outside_box, pix2,2)/len(pix2) < 0.03 and
count_points_on_side_of_line(l2, point_outside_box, pix1,2)/len(pix1) < 0.03 and
(l1_near_corner or l2_near_corner))
if (corners_connected(l1,l2,pix1,pix2) and
corners_connected(l2,l3,pix2,pix3) and
corners_connected(l3,l4,pix3,pix4) and
corners_connected(l4,l1,pix4,pix1)):
boxes.append((c1,c2,c3,c4,polyarea,idx1,idx2,idx3,idx4))
return boxes
###########################################################################################
| 9,204
| 1
| 241
|
89654d5c3e8a017c193c6680270df4ae459a11ff
| 4,099
|
py
|
Python
|
main.py
|
kishorepv/Neural-Calculator
|
196cc95aa0ddb43893aebd989340db4bed916653
|
[
"Apache-2.0"
] | 3
|
2019-06-25T08:16:33.000Z
|
2020-01-30T05:10:59.000Z
|
main.py
|
kishorepv/Neural-Calculator
|
196cc95aa0ddb43893aebd989340db4bed916653
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
kishorepv/Neural-Calculator
|
196cc95aa0ddb43893aebd989340db4bed916653
|
[
"Apache-2.0"
] | null | null | null |
import os
from flask import Flask, flash, redirect, render_template, request, session, abort, url_for, make_response, Response
import sys
import yaml
import os.path
import base64
from predict_digit import *
from matplotlib import image as mplimg
import cv2
import numpy as np
#from flask_json import FlaskJSON, JsonError, json_response, as_json
from flask.ext.responses import json_response
app = Flask(__name__)
#FlaskJSON(app)
number=list()
data=list()
exp=None
@app.route("/", methods=["GET","POST"])
@app.route("/operator", methods=["GET","POST"])
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run(debug=False, host='0.0.0.0', port=31456)
| 26.967105
| 116
| 0.645035
|
import os
from flask import Flask, flash, redirect, render_template, request, session, abort, url_for, make_response, Response
import sys
import yaml
import os.path
import base64
from predict_digit import *
from matplotlib import image as mplimg
import cv2
import numpy as np
#from flask_json import FlaskJSON, JsonError, json_response, as_json
from flask.ext.responses import json_response
app = Flask(__name__)
#FlaskJSON(app)
number=list()
data=list()
def make_number():
global number
stng=''.join([str(i) for i in number])
num=int(stng)
number=list()
return num
def apply_padding(img, border, val):
h,w=img.shape
cols=np.ones((h,border))*val
tmp=np.concatenate([cols,img,cols],axis=1)
rows=np.ones((border, w+2*border))*val
res=np.concatenate([rows,tmp,rows])
return res
def argsort(lst):
return sorted(range(len(lst)), key=lst.__getitem__)
def extract_img(fname="digit_image.jpg"):
im = cv2.imread(fname)
gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
gray=255-gray
cv2.imwrite("grayscale.png",gray)
image,contours,hierarchy= cv2.findContours(gray,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
idx=0
print("No of digits: ", len(contours))
gray2=cv2.imread("grayscale.png")
gray2=cv2.cvtColor(gray2, cv2.COLOR_BGR2GRAY)
c=[ (0,0,255), #red
(0,255,0), #green
(255,0,0),#blue
(255,255,255), #white
(128,128,128), #gray
(0,0,0)#black
]
total=len(contours)
pnt_idxs=argsort([(x,y) for cnt in contours for x,y,w,h in [cv2.boundingRect(cnt)]])
lst=list()
for index,ix in enumerate(pnt_idxs):
x,y,w,h = cv2.boundingRect(contours[ix])
lst.append((x,y,w,h))
idx += 1
#x,y,w,h = cv2.boundingRect(cnt)
roi=gray2[y:y+h,x:x+w]
#cv2.imwrite("tmp.jpg", roi)
#cv2.copyMakeBorder(roi, new_img, borderz, borderz, borderz, borderz, cv2.BORDER_CONSTANT, 255)
new_img=apply_padding(roi, 20, 0)
new_img=255-new_img
cv2.imwrite(str(idx)+".jpg", new_img)
#cv2.rectangle(im,(x,y),(x+20,y+20),c[index],2)
#cv2.imwrite("annotated.jpg", im)
#print("Lst :",lst)
return lst,total
exp=None
@app.route("/", methods=["GET","POST"])
def root():
return render_template("root.html")
def xtract_number():
indices,count=extract_img()
#print("Count: ",count)
ans=list()
for i in range(1,count+1):
ans.append(predict_drawn_img(str(i)+".jpg")[0])
number=int(''.join([str(i) for i in ans]))
#print("Ans: ", ans)
#print(indices)
return number
def is_number(dat):
try:
int(dat)
except:
return False
return True
@app.route("/operator", methods=["GET","POST"])
def operators():
global data
ans=0.0
op=request.json["operator"]
if op=="reset":
data=list()
return json_response({"num":"Draw the number above", "res":0.0}, status_code=200)
elif op=="backspace":
if len(data):
data=data[:-1]
exp=' '.join([str(i) for i in data])
return json_response({"num":exp, "res":ans}, status_code=200)
if data and is_number(data[-1]):
if op=='=':
exp=' '.join([str(i) for i in data+['=']])
ans=solve()
return json_response({"num":exp, "res":ans}, status_code=200)
elif op in ['+', '-', '*','/']:
data.append(op)
exp=' '.join([str(i) for i in data])
return json_response({"num":exp, "res":ans}, status_code=200)
with open("digit_image.jpg",'wb')as f:
f.write(base64.b64decode(request.json["image"].split(',')[1]))
number=xtract_number()
data.append(number)
data.append(op)
exp=' '.join([str(i) for i in data])
if op=='=':
data=data[:-1]
ans=solve()
return json_response({"num":exp, "res":ans}, status_code=200)
def solve():
global data
print(data)
total=data[0]
for index in range(1,len(data),2):
op=data[index]
if op=='+':
total+=data[index+1]
elif op=='-':
total-=data[index+1]
elif op=='*':
total*=data[index+1]
elif op=='/':
total/=data[index+1]
data=list()
print("Total= ", total)
return total
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run(debug=False, host='0.0.0.0', port=31456)
| 3,212
| 0
| 212
|
7d115feb5a97142ac2b56cded232bdf84bcc2ff0
| 2,317
|
py
|
Python
|
src/lamplib/src/genny/tasks/genny_runner.py
|
jordist/genny
|
6a8cdbb98eea3c35b2a67e3627bece5d1efee733
|
[
"Apache-2.0"
] | 30
|
2019-01-30T17:21:44.000Z
|
2022-01-21T00:05:33.000Z
|
src/lamplib/src/genny/tasks/genny_runner.py
|
jordist/genny
|
6a8cdbb98eea3c35b2a67e3627bece5d1efee733
|
[
"Apache-2.0"
] | 358
|
2019-01-15T21:51:57.000Z
|
2022-03-30T16:10:42.000Z
|
src/lamplib/src/genny/tasks/genny_runner.py
|
jordist/genny
|
6a8cdbb98eea3c35b2a67e3627bece5d1efee733
|
[
"Apache-2.0"
] | 50
|
2019-01-15T20:01:15.000Z
|
2022-03-24T16:19:52.000Z
|
from typing import List
import structlog
import tempfile
import shutil
import os
from genny.cmd_runner import run_command
from genny.curator import poplar_grpc
from genny.tasks import preprocess
SLOG = structlog.get_logger(__name__)
def main_genny_runner(
genny_args: List[str],
genny_repo_root: str,
cleanup_metrics: bool,
workspace_root: str,
hang: bool = False,
):
"""
Intended to be the main entry point for running Genny.
"""
with poplar_grpc(
cleanup_metrics=cleanup_metrics,
workspace_root=workspace_root,
genny_repo_root=genny_repo_root,
):
path = os.path.join(genny_repo_root, "dist", "bin", "genny_core")
if not os.path.exists(path):
SLOG.error("genny_core not found. Run install first.", path=path)
raise Exception(f"genny_core not found at {path}.")
cmd = [path, *genny_args]
preprocessed_dir = os.path.join(workspace_root, "build/WorkloadOutput/workload")
os.makedirs(preprocessed_dir, exist_ok=True)
# Intercept the workload given to the core binary.
index = -1
if "-w" in cmd:
index = cmd.index("-w") + 1
elif "--workload-file" in cmd:
index = cmd.index("--workload-file") + 1
elif "dry-run" in cmd:
index = cmd.index("dry-run") + 1
if index >= 0:
workload_path = cmd[index]
smoke = "-s" in cmd or "--smoke-test" in cmd
temp_workload = os.path.join(preprocessed_dir, os.path.basename(workload_path))
with open(temp_workload, "w") as f:
preprocess.preprocess(workload_path=workload_path, smoke=smoke, output_file=f)
cmd[index] = temp_workload
if hang:
import time
import shlex
SLOG.info(
"Debug mode. Poplar is running. "
"Start genny_core (./build/src/driver/genny_core or ./dist/bin/genny_core) "
"on your own with the fully processed workload file."
f"\n\n {shlex.join(cmd)}\n\n"
"Ctrl+C here when done."
)
while True:
time.sleep(10)
run_command(
cmd=cmd, capture=False, check=True, cwd=workspace_root,
)
| 31.310811
| 94
| 0.596893
|
from typing import List
import structlog
import tempfile
import shutil
import os
from genny.cmd_runner import run_command
from genny.curator import poplar_grpc
from genny.tasks import preprocess
SLOG = structlog.get_logger(__name__)
def main_genny_runner(
genny_args: List[str],
genny_repo_root: str,
cleanup_metrics: bool,
workspace_root: str,
hang: bool = False,
):
"""
Intended to be the main entry point for running Genny.
"""
with poplar_grpc(
cleanup_metrics=cleanup_metrics,
workspace_root=workspace_root,
genny_repo_root=genny_repo_root,
):
path = os.path.join(genny_repo_root, "dist", "bin", "genny_core")
if not os.path.exists(path):
SLOG.error("genny_core not found. Run install first.", path=path)
raise Exception(f"genny_core not found at {path}.")
cmd = [path, *genny_args]
preprocessed_dir = os.path.join(workspace_root, "build/WorkloadOutput/workload")
os.makedirs(preprocessed_dir, exist_ok=True)
# Intercept the workload given to the core binary.
index = -1
if "-w" in cmd:
index = cmd.index("-w") + 1
elif "--workload-file" in cmd:
index = cmd.index("--workload-file") + 1
elif "dry-run" in cmd:
index = cmd.index("dry-run") + 1
if index >= 0:
workload_path = cmd[index]
smoke = "-s" in cmd or "--smoke-test" in cmd
temp_workload = os.path.join(preprocessed_dir, os.path.basename(workload_path))
with open(temp_workload, "w") as f:
preprocess.preprocess(workload_path=workload_path, smoke=smoke, output_file=f)
cmd[index] = temp_workload
if hang:
import time
import shlex
SLOG.info(
"Debug mode. Poplar is running. "
"Start genny_core (./build/src/driver/genny_core or ./dist/bin/genny_core) "
"on your own with the fully processed workload file."
f"\n\n {shlex.join(cmd)}\n\n"
"Ctrl+C here when done."
)
while True:
time.sleep(10)
run_command(
cmd=cmd, capture=False, check=True, cwd=workspace_root,
)
| 0
| 0
| 0
|
1626418bf281d9d52bb39d181c8430b2beca0bad
| 2,374
|
py
|
Python
|
telesurvideos/cms_plugins.py
|
dreglad/telesurvideos
|
626ac2e4e1a158b50e847fe6753485f1d9c72fcf
|
[
"MIT"
] | null | null | null |
telesurvideos/cms_plugins.py
|
dreglad/telesurvideos
|
626ac2e4e1a158b50e847fe6753485f1d9c72fcf
|
[
"MIT"
] | null | null | null |
telesurvideos/cms_plugins.py
|
dreglad/telesurvideos
|
626ac2e4e1a158b50e847fe6753485f1d9c72fcf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*- #
"""telesurvideos CMS plugins"""
from __future__ import unicode_literals
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from .models import ProgramaListPluginModel, VideoListPluginModel
class ProgramaListaPlugin(CMSPluginBase):
"""Clips lista plugin class"""
model = ProgramaListPluginModel
render_template = "clips/programa_list.html"
name = _("Listado de programas teleSUR")
module = _("teleSUR Videos")
page_only = True
admin_preview = True
cache = False
plugin_pool.register_plugin(ProgramaListaPlugin)
class ClipsListaPlugin(CMSPluginBase):
"""Cliplista plugin class"""
model = VideoListPluginModel
render_template = "clips/clip_list.html"
name = _("Listado de videos teleSUR")
module = _("teleSUR Videos")
page_only = True
admin_preview = True
cache = False
fieldsets = (
(None, {
'fields': ('titulo',),
}),
(_('Opciones de despliegue'), {
'classes': ('wide',),
'fields': (
'mostrar_titulos', 'mostrar_descripciones', 'mostrar_tags', 'mostrar_fecha',
'mostrar_banner', 'layout', 'mostrar_mas',
),
}),
(_('Contenido especifífico'), {
'classes': ('wide',),
'fields': ('elementos',),
}),
(_('Filtrar por selección del editor'), {
'classes': ('collapse',),
'fields': ('seleccionados',),
}),
(_('Filtrar por tipo'), {
'classes': ('collapse',),
'fields': ('tipos',),
}),
(_('Filtrar por programa'), {
'classes': ('collapse',),
'fields': ('programas',),
}),
(_('Filtrar por categoria'), {
'classes': ('collapse',),
'fields': ('categorias',),
}),
(_('Filtrar por corresponsal'), {
'classes': ('collapse',),
'fields': ('corresponsales',),
}),
(_('Filtrar por tema'), {
'classes': ('collapse',),
'fields': ('temas',),
}),
(_('Filtrar por serie'), {
'classes': ('collapse',),
'fields': ('series',),
}),
)
plugin_pool.register_plugin(ClipsListaPlugin)
| 29.675
| 92
| 0.545914
|
# -*- coding: utf-8 -*- #
"""telesurvideos CMS plugins"""
from __future__ import unicode_literals
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from .models import ProgramaListPluginModel, VideoListPluginModel
class ProgramaListaPlugin(CMSPluginBase):
"""Clips lista plugin class"""
model = ProgramaListPluginModel
render_template = "clips/programa_list.html"
name = _("Listado de programas teleSUR")
module = _("teleSUR Videos")
page_only = True
admin_preview = True
cache = False
plugin_pool.register_plugin(ProgramaListaPlugin)
class ClipsListaPlugin(CMSPluginBase):
"""Cliplista plugin class"""
model = VideoListPluginModel
render_template = "clips/clip_list.html"
name = _("Listado de videos teleSUR")
module = _("teleSUR Videos")
page_only = True
admin_preview = True
cache = False
fieldsets = (
(None, {
'fields': ('titulo',),
}),
(_('Opciones de despliegue'), {
'classes': ('wide',),
'fields': (
'mostrar_titulos', 'mostrar_descripciones', 'mostrar_tags', 'mostrar_fecha',
'mostrar_banner', 'layout', 'mostrar_mas',
),
}),
(_('Contenido especifífico'), {
'classes': ('wide',),
'fields': ('elementos',),
}),
(_('Filtrar por selección del editor'), {
'classes': ('collapse',),
'fields': ('seleccionados',),
}),
(_('Filtrar por tipo'), {
'classes': ('collapse',),
'fields': ('tipos',),
}),
(_('Filtrar por programa'), {
'classes': ('collapse',),
'fields': ('programas',),
}),
(_('Filtrar por categoria'), {
'classes': ('collapse',),
'fields': ('categorias',),
}),
(_('Filtrar por corresponsal'), {
'classes': ('collapse',),
'fields': ('corresponsales',),
}),
(_('Filtrar por tema'), {
'classes': ('collapse',),
'fields': ('temas',),
}),
(_('Filtrar por serie'), {
'classes': ('collapse',),
'fields': ('series',),
}),
)
plugin_pool.register_plugin(ClipsListaPlugin)
| 0
| 0
| 0
|
1c4e3d57f90816dbc19b8d30bb28fc13d0e022c9
| 2,793
|
py
|
Python
|
test_cmd_app/test_polishing.py
|
ntnguyen63/ares
|
a09ca91ed43007946f489c48b8c22f3a80d8a069
|
[
"MIT"
] | null | null | null |
test_cmd_app/test_polishing.py
|
ntnguyen63/ares
|
a09ca91ed43007946f489c48b8c22f3a80d8a069
|
[
"MIT"
] | null | null | null |
test_cmd_app/test_polishing.py
|
ntnguyen63/ares
|
a09ca91ed43007946f489c48b8c22f3a80d8a069
|
[
"MIT"
] | null | null | null |
import os
import shutil
import subprocess
from pathlib import Path
from ..lib.polisher import (
PolishPipeline,
create_sorted_aln,
pilon,
ShortReadPolishRunner,
)
draft = Path("test_data/drafts/assembly.fasta")
r1, r2 = ["test_data/r1.fastq", "test_data/r2.fastq"]
long_reads = "test_data/long.fastq"
busco_lineage = "fungi_odb10"
threads = 30
pipeline = PolishPipeline(
root_dir="polishing",
drafts=[Path(draft)],
long_reads=long_reads,
short_reads=[r1, r2],
threads=threads,
lineage=busco_lineage,
)
| 25.162162
| 65
| 0.694952
|
import os
import shutil
import subprocess
from pathlib import Path
from ..lib.polisher import (
PolishPipeline,
create_sorted_aln,
pilon,
ShortReadPolishRunner,
)
draft = Path("test_data/drafts/assembly.fasta")
r1, r2 = ["test_data/r1.fastq", "test_data/r2.fastq"]
long_reads = "test_data/long.fastq"
busco_lineage = "fungi_odb10"
threads = 30
pipeline = PolishPipeline(
root_dir="polishing",
drafts=[Path(draft)],
long_reads=long_reads,
short_reads=[r1, r2],
threads=threads,
lineage=busco_lineage,
)
def test_polish():
Path("polishing").mkdir(exist_ok=True)
busco_result = pipeline.polish(draft)
assert os.path.exists(busco_result.assembly)
assert busco_result.busco_score
assert busco_result.busco_path
def test_medaka_polish():
Path("polishing/assembly").mkdir(exist_ok=True, parents=True)
busco_result = pipeline.medaka_polish(
"polishing/assembly", draft.as_posix()
)
assert os.path.exists(busco_result.assembly)
assert busco_result.busco_path
assert busco_result.busco_score
def test_racon_polish():
Path("polishing/assembly").mkdir(exist_ok=True, parents=True)
busco_result = pipeline.racon_polish(
"polishing/assembly", draft.as_posix()
)
assert os.path.exists(busco_result.assembly)
assert busco_result.busco_path
assert busco_result.busco_score
def test_create_sorted_aln():
sorted_aln = create_sorted_aln(
assembly=draft,
r1=r1,
r2=r2,
threads=threads,
out="sorted_aln",
)
assert sorted_aln
assert sorted_aln.is_file()
def test_pilon_polish():
Path("polishing/assembly").mkdir(exist_ok=True, parents=True)
busco_result = pipeline.pilon_polish(
"polishing/assembly", draft.as_posix()
)
assert busco_result.assembly
assert busco_result.busco_score > 30
assert busco_result.busco_path
def test_pilon():
Path("polishing/assembly").mkdir(exist_ok=True, parents=True)
busco_result = pilon(
outdir="polishing/assembly",
draft=draft.as_posix(),
r1=r1,
r2=r2,
busco_lineage=busco_lineage,
threads=threads,
)
assert busco_result.assembly
assert busco_result.busco_score > 30
assert busco_result.busco_path
def test_short_read_polish_runner():
Path("polishing/assembly").mkdir(exist_ok=True, parents=True)
runner = ShortReadPolishRunner(
root_dir="polishing/assembly",
draft=draft.as_posix(),
r1=r1,
r2=r2,
lineage=busco_lineage,
threads=threads,
rounds=4,
)
best = runner.run(pilon)
assert best.assembly
assert best.busco_score
assert best.busco_path
assert os.path.exists("polishing/assembly/pilon")
| 2,079
| 0
| 161
|
30f427df8e935168a3306f6e515597316050143a
| 219
|
py
|
Python
|
Emmanuel ANENE/Phase 1/Python Basic 1/Day5/task7.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
Emmanuel ANENE/Phase 1/Python Basic 1/Day5/task7.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
Emmanuel ANENE/Phase 1/Python Basic 1/Day5/task7.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
fname = input("Enter your first name: ")
lname = input("Enter your last name: ")
age = int(input("Enter your age: "))
print('''First name: {0}
Last name: {1}
Age: {2}'''.format(fname.capitalize(), lname.upper(), age))
| 27.375
| 59
| 0.639269
|
fname = input("Enter your first name: ")
lname = input("Enter your last name: ")
age = int(input("Enter your age: "))
print('''First name: {0}
Last name: {1}
Age: {2}'''.format(fname.capitalize(), lname.upper(), age))
| 0
| 0
| 0
|
4802b03aa9efe968b78419bc86c831ac9abff6b0
| 2,104
|
py
|
Python
|
tests/b017.py
|
cricalix/flake8-bugbear
|
752a60df73f98e60231d4877e2a33fa3b9931ca1
|
[
"MIT"
] | null | null | null |
tests/b017.py
|
cricalix/flake8-bugbear
|
752a60df73f98e60231d4877e2a33fa3b9931ca1
|
[
"MIT"
] | null | null | null |
tests/b017.py
|
cricalix/flake8-bugbear
|
752a60df73f98e60231d4877e2a33fa3b9931ca1
|
[
"MIT"
] | null | null | null |
"""
Should emit:
B017 - on lines 15, 20, 25
All tests are valid unittest syntax, and will work if this code
is executed.
"""
import asyncio
import unittest
# None of these should trigger the With visitor.
CONSTANT = True
# This may trigger the visitor, but it shouldn't match the filters
with Foo() as a:
print(a)
if __name__ == "__main__":
unittest.main()
| 29.633803
| 75
| 0.672053
|
"""
Should emit:
B017 - on lines 15, 20, 25
All tests are valid unittest syntax, and will work if this code
is executed.
"""
import asyncio
import unittest
class AssertRaisesThatShouldTrigger(unittest.TestCase):
def test_bare_Exception(self) -> None:
"""The use of Exception like this will catch everything"""
with self.assertRaises(Exception):
print(k["evil"]) # NameError
def test_tuple_with_Exception(self) -> None:
"""The use of Exception in the tuple will still catch everything"""
with self.assertRaises((Exception, ValueError)):
print("I can't spell print", indent=1) # TypeError
def test_tuple_with_module_and_Exception(self) -> None:
"""The use of Exception in the tuple will still catch everything"""
with self.assertRaises((Exception, asyncio.CancelledError)):
self.bogus # AttributeError
class AssertRaisesThatShouldNotTrigger(unittest.TestCase):
def test_context_manager_raises(self) -> None:
"""A context manager being present means someone has probably
done a test afterwards; a python linter should have caught the
lack of use of 'ex' otherwise"""
with self.assertRaises(Exception) as ex:
raise ValueError("Context manager is good")
self.assertEqual("Context manager is good", str(ex.exception))
def test_raisesregex(self) -> None:
with self.assertRaisesRegex(Exception, "Regex is good"):
raise ValueError("Regex is good")
def test_raises_with_absolute_reference(self):
with self.assertRaises(asyncio.CancelledError):
raise asyncio.CancelledError()
# None of these should trigger the With visitor.
CONSTANT = True
def something_else() -> None:
for i in (1, 2, 3):
print(i)
class Foo:
def __enter__(self, *args, **kwargs) -> None:
yield
def __exit__(self, *args, **kwargs) -> None:
...
# This may trigger the visitor, but it shouldn't match the filters
with Foo() as a:
print(a)
if __name__ == "__main__":
unittest.main()
| 371
| 1,212
| 145
|
ac83689e53eaa3778b0752aa47228c1d884d7453
| 1,451
|
py
|
Python
|
setup.py
|
NiklasHoltmeyer/sentiment-analysis
|
705c3a5ebe75fd89b2fe9c38c9611cae3695c8db
|
[
"MIT"
] | null | null | null |
setup.py
|
NiklasHoltmeyer/sentiment-analysis
|
705c3a5ebe75fd89b2fe9c38c9611cae3695c8db
|
[
"MIT"
] | null | null | null |
setup.py
|
NiklasHoltmeyer/sentiment-analysis
|
705c3a5ebe75fd89b2fe9c38c9611cae3695c8db
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('LICENSE') as f:
license = f.read()
#with open('README.rst') as f:
#readme = f.read()
setup(
name='DeepSentiment',
version='0.1.0',
description='Deep Learning for Sentiment Analysis',
#long_description=readme,
author='Niklas Holtmeyer',
url='https://github.com/NiklasHoltmeyer/sentiment-analysis',
license=license,
scripts=["scripts/install_prerequisites.sh"],
packages=find_packages(exclude=('tests', 'docs')),
#dependency_links=[
#"https://download.pytorch.org/whl/torch_stable.html",
#],
install_requires=[
"tqdm",#==4.41.1
'pandas==1.1.5', #1.2.0
'matplotlib==3.3.3',
'chakin==0.0.8',
'contractions==0.0.43',
'emoji==0.6.0',
'nltk==3.5',
'scikit-learn==0.23.2',
'scipy==1.5.4',
'tensorboard==2.4.0',
'tensorboard-plugin-wit==1.7.0',
'tensorboardx==2.1',
'tensorflow',
'tensorflow-estimator',
'tensorflow-metadata==0.26.0',
'tfa-nightly==0.13.0.dev20201223200403',
'tfds-nightly==4.1.0.dev202012260107',
'tokenizers==0.9.4',
#'torch==1.6.0+cu101', #Cuda 10.1 <- google Colab
#'torchvision==0.7.0+cu101', #Cuda 10.1 <- colab
#'torch',
#'torchvision',
'transformers',
'simpletransformers',
],
)
| 28.45098
| 64
| 0.562371
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('LICENSE') as f:
license = f.read()
#with open('README.rst') as f:
#readme = f.read()
setup(
name='DeepSentiment',
version='0.1.0',
description='Deep Learning for Sentiment Analysis',
#long_description=readme,
author='Niklas Holtmeyer',
url='https://github.com/NiklasHoltmeyer/sentiment-analysis',
license=license,
scripts=["scripts/install_prerequisites.sh"],
packages=find_packages(exclude=('tests', 'docs')),
#dependency_links=[
#"https://download.pytorch.org/whl/torch_stable.html",
#],
install_requires=[
"tqdm",#==4.41.1
'pandas==1.1.5', #1.2.0
'matplotlib==3.3.3',
'chakin==0.0.8',
'contractions==0.0.43',
'emoji==0.6.0',
'nltk==3.5',
'scikit-learn==0.23.2',
'scipy==1.5.4',
'tensorboard==2.4.0',
'tensorboard-plugin-wit==1.7.0',
'tensorboardx==2.1',
'tensorflow',
'tensorflow-estimator',
'tensorflow-metadata==0.26.0',
'tfa-nightly==0.13.0.dev20201223200403',
'tfds-nightly==4.1.0.dev202012260107',
'tokenizers==0.9.4',
#'torch==1.6.0+cu101', #Cuda 10.1 <- google Colab
#'torchvision==0.7.0+cu101', #Cuda 10.1 <- colab
#'torch',
#'torchvision',
'transformers',
'simpletransformers',
],
)
| 0
| 0
| 0
|
b45d40200f29a502282da356feff14ba69526f79
| 435
|
py
|
Python
|
dash/admin.py
|
CMPUT404W17T00/CMPUT404-project
|
870e1844885906f8b48eac9a888e4874d28b3745
|
[
"Apache-2.0"
] | 2
|
2017-03-02T01:46:36.000Z
|
2018-11-01T20:36:58.000Z
|
dash/admin.py
|
CMPUT404W17T00/CMPUT404-project
|
870e1844885906f8b48eac9a888e4874d28b3745
|
[
"Apache-2.0"
] | 47
|
2017-01-23T00:00:17.000Z
|
2017-04-12T21:22:23.000Z
|
dash/admin.py
|
CMPUT404W17T00/CMPUT404-project
|
870e1844885906f8b48eac9a888e4874d28b3745
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Post, Comment, Author, Category, CanSee, FriendRequest, \
Follow, RemoteCommentAuthor
# Register your models here.
admin.site.register(Author)
admin.site.register(RemoteCommentAuthor)
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Category)
admin.site.register(CanSee)
admin.site.register(FriendRequest)
admin.site.register(Follow)
| 29
| 77
| 0.781609
|
from django.contrib import admin
from .models import Post, Comment, Author, Category, CanSee, FriendRequest, \
Follow, RemoteCommentAuthor
# Register your models here.
admin.site.register(Author)
admin.site.register(RemoteCommentAuthor)
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Category)
admin.site.register(CanSee)
admin.site.register(FriendRequest)
admin.site.register(Follow)
| 0
| 0
| 0
|
471a5abbb901cd7e4469624d81045db786180f14
| 143
|
py
|
Python
|
botaclan/player/__init__.py
|
bataclanofficial/botaclan
|
93f8ccab4f29c50a395a588b7779431eab8625e8
|
[
"Apache-2.0"
] | null | null | null |
botaclan/player/__init__.py
|
bataclanofficial/botaclan
|
93f8ccab4f29c50a395a588b7779431eab8625e8
|
[
"Apache-2.0"
] | null | null | null |
botaclan/player/__init__.py
|
bataclanofficial/botaclan
|
93f8ccab4f29c50a395a588b7779431eab8625e8
|
[
"Apache-2.0"
] | null | null | null |
# This package had a lot of inspiration and snippes from the following Gist
# https://gist.github.com/vbe0201/ade9b80f2d3b64643d854938d40a0a2d
| 47.666667
| 75
| 0.825175
|
# This package had a lot of inspiration and snippes from the following Gist
# https://gist.github.com/vbe0201/ade9b80f2d3b64643d854938d40a0a2d
| 0
| 0
| 0
|
e81dbd23afaf25d2cb8a21092aedb2236f38b279
| 17,573
|
py
|
Python
|
lib/matplotlib/tests/test_colors.py
|
yuvallanger/matplotlib
|
e0020d318a9a9685594c6bff4631f74599321459
|
[
"MIT",
"BSD-3-Clause"
] | 8
|
2017-04-11T08:55:30.000Z
|
2022-03-25T04:31:26.000Z
|
lib/matplotlib/tests/test_colors.py
|
epgauss/matplotlib
|
c9898ea9a30c67c579ab27cd61b68e2abae0fb0e
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/tests/test_colors.py
|
epgauss/matplotlib
|
c9898ea9a30c67c579ab27cd61b68e2abae0fb0e
|
[
"MIT",
"BSD-3-Clause"
] | 14
|
2015-10-05T04:15:46.000Z
|
2020-06-11T18:06:02.000Z
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import itertools
from distutils.version import LooseVersion as V
from nose.tools import assert_raises
import numpy as np
from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.cbook as cbook
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import (image_comparison,
cleanup, knownfailureif)
def test_colormap_endian():
"""
Github issue #1005: a bug in putmask caused erroneous
mapping of 1.0 when input from a non-native-byteorder
array.
"""
cmap = cm.get_cmap("jet")
# Test under, over, and invalid along with values 0 and 1.
a = [-0.5, 0, 0.5, 1, 1.5, np.nan]
for dt in ["f2", "f4", "f8"]:
anative = np.ma.masked_invalid(np.array(a, dtype=dt))
aforeign = anative.byteswap().newbyteorder()
#print(anative.dtype.isnative, aforeign.dtype.isnative)
assert_array_equal(cmap(anative), cmap(aforeign))
def test_BoundaryNorm():
"""
Github issue #1258: interpolation was failing with numpy
1.7 pre-release.
"""
# TODO: expand this into a more general test of BoundaryNorm.
boundaries = [0, 1.1, 2.2]
vals = [-1, 0, 2, 2.2, 4]
expected = [-1, 0, 2, 3, 3]
# ncolors != len(boundaries) - 1 triggers interpolation
ncolors = len(boundaries)
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
def test_LogNorm():
"""
LogNorm ignored clip, now it has the same
behavior as Normalize, e.g., values > vmax are bigger than 1
without clip, with clip they are 1.
"""
ln = mcolors.LogNorm(clip=True, vmax=5)
assert_array_equal(ln([1, 6]), [0, 1.0])
def test_SymLogNorm():
"""
Test SymLogNorm behavior
"""
norm = mcolors.SymLogNorm(3, vmax=5, linscale=1.2)
vals = np.array([-30, -1, 2, 6], dtype=np.float)
normed_vals = norm(vals)
expected = [0., 0.53980074, 0.826991, 1.02758204]
assert_array_almost_equal(normed_vals, expected)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
# Ensure that specifying vmin returns the same result as above
norm = mcolors.SymLogNorm(3, vmin=-30, vmax=5, linscale=1.2)
normed_vals = norm(vals)
assert_array_almost_equal(normed_vals, expected)
def _inverse_tester(norm_instance, vals):
"""
Checks if the inverse of the given normalization is working.
"""
assert_array_almost_equal(norm_instance.inverse(norm_instance(vals)), vals)
def _scalar_tester(norm_instance, vals):
"""
Checks if scalars and arrays are handled the same way.
Tests only for float.
"""
scalar_result = [norm_instance(float(v)) for v in vals]
assert_array_almost_equal(scalar_result, norm_instance(vals))
def _mask_tester(norm_instance, vals):
"""
Checks mask handling
"""
masked_array = np.ma.array(vals)
masked_array[0] = np.ma.masked
assert_array_equal(masked_array.mask, norm_instance(masked_array).mask)
@image_comparison(baseline_images=['levels_and_colors'],
extensions=['png'])
@cleanup
@image_comparison(baseline_images=['light_source_shading_topo'],
extensions=['png'])
def test_light_source_topo_surface():
"""Shades a DEM using different v.e.'s and blend modes."""
fname = cbook.get_sample_data('jacksboro_fault_dem.npz', asfileobj=False)
dem = np.load(fname)
elev = dem['elevation']
# Get the true cellsize in meters for accurate vertical exaggeration
# Convert from decimal degrees to meters
dx, dy = dem['dx'], dem['dy']
dx = 111320.0 * dx * np.cos(dem['ymin'])
dy = 111320.0 * dy
dem.close()
ls = mcolors.LightSource(315, 45)
cmap = cm.gist_earth
fig, axes = plt.subplots(nrows=3, ncols=3)
for row, mode in zip(axes, ['hsv', 'overlay', 'soft']):
for ax, ve in zip(row, [0.1, 1, 10]):
rgb = ls.shade(elev, cmap, vert_exag=ve, dx=dx, dy=dy,
blend_mode=mode)
ax.imshow(rgb)
ax.set(xticks=[], yticks=[])
@knownfailureif(V(np.__version__) >= V('1.9.0'))
def test_light_source_shading_default():
"""Array comparison test for the default "hsv" blend mode. Ensure the
default result doesn't change without warning."""
y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
z = 10 * np.cos(x**2 + y**2)
cmap = plt.cm.copper
ls = mcolors.LightSource(315, 45)
rgb = ls.shade(z, cmap)
# Result stored transposed and rounded for for more compact display...
expect = np.array([[[0.87, 0.85, 0.90, 0.90, 0.82, 0.62, 0.34, 0.00],
[0.85, 0.94, 0.99, 1.00, 1.00, 0.96, 0.62, 0.17],
[0.90, 0.99, 1.00, 1.00, 1.00, 1.00, 0.71, 0.33],
[0.90, 1.00, 1.00, 1.00, 1.00, 0.98, 0.51, 0.29],
[0.82, 1.00, 1.00, 1.00, 1.00, 0.64, 0.25, 0.13],
[0.62, 0.96, 1.00, 0.98, 0.64, 0.22, 0.06, 0.03],
[0.34, 0.62, 0.71, 0.51, 0.25, 0.06, 0.00, 0.01],
[0.00, 0.17, 0.33, 0.29, 0.13, 0.03, 0.01, 0.00]],
[[0.87, 0.79, 0.83, 0.80, 0.66, 0.44, 0.23, 0.00],
[0.79, 0.88, 0.93, 0.92, 0.83, 0.66, 0.38, 0.10],
[0.83, 0.93, 0.99, 1.00, 0.92, 0.75, 0.40, 0.18],
[0.80, 0.92, 1.00, 0.99, 0.93, 0.75, 0.28, 0.14],
[0.66, 0.83, 0.92, 0.93, 0.87, 0.44, 0.12, 0.06],
[0.44, 0.66, 0.75, 0.75, 0.44, 0.12, 0.03, 0.01],
[0.23, 0.38, 0.40, 0.28, 0.12, 0.03, 0.00, 0.00],
[0.00, 0.10, 0.18, 0.14, 0.06, 0.01, 0.00, 0.00]],
[[0.87, 0.75, 0.78, 0.73, 0.55, 0.33, 0.16, 0.00],
[0.75, 0.85, 0.90, 0.86, 0.71, 0.48, 0.23, 0.05],
[0.78, 0.90, 0.98, 1.00, 0.82, 0.51, 0.21, 0.08],
[0.73, 0.86, 1.00, 0.97, 0.84, 0.47, 0.11, 0.05],
[0.55, 0.71, 0.82, 0.84, 0.71, 0.20, 0.03, 0.01],
[0.33, 0.48, 0.51, 0.47, 0.20, 0.02, 0.00, 0.00],
[0.16, 0.23, 0.21, 0.11, 0.03, 0.00, 0.00, 0.00],
[0.00, 0.05, 0.08, 0.05, 0.01, 0.00, 0.00, 0.00]],
[[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]]]).T
assert_array_almost_equal(rgb, expect, decimal=2)
@knownfailureif(V(np.__version__) >= V('1.9.0') or
V(np.__version__) < V('1.7.0'))
def test_light_source_masked_shading():
"""Array comparison test for a surface with a masked portion. Ensures that
we don't wind up with "fringes" of odd colors around masked regions."""
y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
z = 10 * np.cos(x**2 + y**2)
z = np.ma.masked_greater(z, 9.9)
cmap = plt.cm.copper
ls = mcolors.LightSource(315, 45)
rgb = ls.shade(z, cmap)
# Result stored transposed and rounded for for more compact display...
expect = np.array([[[1.00, 0.95, 0.96, 0.94, 0.86, 0.67, 0.40, 0.03],
[0.95, 0.99, 1.00, 1.00, 1.00, 0.98, 0.67, 0.19],
[0.96, 1.00, 1.00, 1.00, 1.00, 1.00, 0.78, 0.36],
[0.94, 1.00, 1.00, 0.00, 0.00, 1.00, 0.55, 0.32],
[0.86, 1.00, 1.00, 0.00, 0.00, 1.00, 0.27, 0.14],
[0.67, 0.98, 1.00, 1.00, 1.00, 1.00, 0.07, 0.03],
[0.40, 0.67, 0.78, 0.55, 0.27, 0.07, 0.00, 0.01],
[0.03, 0.19, 0.36, 0.32, 0.14, 0.03, 0.01, 0.00]],
[[1.00, 0.93, 0.93, 0.88, 0.72, 0.50, 0.28, 0.03],
[0.93, 0.97, 0.99, 0.96, 0.87, 0.70, 0.42, 0.11],
[0.93, 0.99, 0.74, 0.78, 0.78, 0.74, 0.45, 0.20],
[0.88, 0.96, 0.78, 0.00, 0.00, 0.78, 0.32, 0.16],
[0.72, 0.87, 0.78, 0.00, 0.00, 0.78, 0.14, 0.06],
[0.50, 0.70, 0.74, 0.78, 0.78, 0.74, 0.03, 0.01],
[0.28, 0.42, 0.45, 0.32, 0.14, 0.03, 0.00, 0.00],
[0.03, 0.11, 0.20, 0.16, 0.06, 0.01, 0.00, 0.00]],
[[1.00, 0.91, 0.91, 0.84, 0.64, 0.39, 0.21, 0.03],
[0.91, 0.96, 0.98, 0.93, 0.77, 0.53, 0.27, 0.06],
[0.91, 0.98, 0.47, 0.50, 0.50, 0.47, 0.25, 0.10],
[0.84, 0.93, 0.50, 0.00, 0.00, 0.50, 0.13, 0.06],
[0.64, 0.77, 0.50, 0.00, 0.00, 0.50, 0.03, 0.01],
[0.39, 0.53, 0.47, 0.50, 0.50, 0.47, 0.00, 0.00],
[0.21, 0.27, 0.25, 0.13, 0.03, 0.00, 0.00, 0.00],
[0.03, 0.06, 0.10, 0.06, 0.01, 0.00, 0.00, 0.00]],
[[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]]]).T
assert_array_almost_equal(rgb, expect, decimal=2)
def test_light_source_hillshading():
"""Compare the current hillshading method against one that should be
mathematically equivalent. Illuminates a cone from a range of angles."""
y, x = np.mgrid[5:0:-1, :5]
z = -np.hypot(x - x.mean(), y - y.mean())
for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
ls = mcolors.LightSource(az, elev)
h1 = ls.hillshade(z)
h2 = alternative_hillshade(az, elev, z)
assert_array_almost_equal(h1, h2)
def test_light_source_planar_hillshading():
"""Ensure that the illumination intensity is correct for planar
surfaces."""
def plane(azimuth, elevation, x, y):
"""Create a plane whose normal vector is at the given azimuth and
elevation."""
theta, phi = _azimuth2math(azimuth, elevation)
a, b, c = _sph2cart(theta, phi)
z = -(a*x + b*y) / c
return z
def angled_plane(azimuth, elevation, angle, x, y):
"""Create a plane whose normal vector is at an angle from the given
azimuth and elevation."""
elevation = elevation + angle
if elevation > 90:
azimuth = (azimuth + 180) % 360
elevation = (90 - elevation) % 90
return plane(azimuth, elevation, x, y)
y, x = np.mgrid[5:0:-1, :5]
for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
ls = mcolors.LightSource(az, elev)
# Make a plane at a range of angles to the illumination
for angle in range(0, 105, 15):
z = angled_plane(az, elev, angle, x, y)
h = ls.hillshade(z)
assert_array_almost_equal(h, np.cos(np.radians(angle)))
def _azimuth2math(azimuth, elevation):
"""Converts from clockwise-from-north and up-from-horizontal to
mathematical conventions."""
theta = np.radians((90 - azimuth) % 360)
phi = np.radians(90 - elevation)
return theta, phi
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| 39.051111
| 79
| 0.521937
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import itertools
from distutils.version import LooseVersion as V
from nose.tools import assert_raises
import numpy as np
from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.cbook as cbook
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import (image_comparison,
cleanup, knownfailureif)
def test_colormap_endian():
"""
Github issue #1005: a bug in putmask caused erroneous
mapping of 1.0 when input from a non-native-byteorder
array.
"""
cmap = cm.get_cmap("jet")
# Test under, over, and invalid along with values 0 and 1.
a = [-0.5, 0, 0.5, 1, 1.5, np.nan]
for dt in ["f2", "f4", "f8"]:
anative = np.ma.masked_invalid(np.array(a, dtype=dt))
aforeign = anative.byteswap().newbyteorder()
#print(anative.dtype.isnative, aforeign.dtype.isnative)
assert_array_equal(cmap(anative), cmap(aforeign))
def test_BoundaryNorm():
"""
Github issue #1258: interpolation was failing with numpy
1.7 pre-release.
"""
# TODO: expand this into a more general test of BoundaryNorm.
boundaries = [0, 1.1, 2.2]
vals = [-1, 0, 2, 2.2, 4]
expected = [-1, 0, 2, 3, 3]
# ncolors != len(boundaries) - 1 triggers interpolation
ncolors = len(boundaries)
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
def test_LogNorm():
"""
LogNorm ignored clip, now it has the same
behavior as Normalize, e.g., values > vmax are bigger than 1
without clip, with clip they are 1.
"""
ln = mcolors.LogNorm(clip=True, vmax=5)
assert_array_equal(ln([1, 6]), [0, 1.0])
def test_PowerNorm():
a = np.array([0, 0.5, 1, 1.5], dtype=np.float)
pnorm = mcolors.PowerNorm(1)
norm = mcolors.Normalize()
assert_array_almost_equal(norm(a), pnorm(a))
a = np.array([-0.5, 0, 2, 4, 8], dtype=np.float)
expected = [0, 0, 1./16, 1./4, 1]
pnorm = mcolors.PowerNorm(2, vmin=0, vmax=8)
assert_array_almost_equal(pnorm(a), expected)
assert_array_almost_equal(a[1:], pnorm.inverse(pnorm(a))[1:])
def test_Normalize():
norm = mcolors.Normalize()
vals = np.arange(-10, 10, 1, dtype=np.float)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
def test_SymLogNorm():
"""
Test SymLogNorm behavior
"""
norm = mcolors.SymLogNorm(3, vmax=5, linscale=1.2)
vals = np.array([-30, -1, 2, 6], dtype=np.float)
normed_vals = norm(vals)
expected = [0., 0.53980074, 0.826991, 1.02758204]
assert_array_almost_equal(normed_vals, expected)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
# Ensure that specifying vmin returns the same result as above
norm = mcolors.SymLogNorm(3, vmin=-30, vmax=5, linscale=1.2)
normed_vals = norm(vals)
assert_array_almost_equal(normed_vals, expected)
def _inverse_tester(norm_instance, vals):
"""
Checks if the inverse of the given normalization is working.
"""
assert_array_almost_equal(norm_instance.inverse(norm_instance(vals)), vals)
def _scalar_tester(norm_instance, vals):
"""
Checks if scalars and arrays are handled the same way.
Tests only for float.
"""
scalar_result = [norm_instance(float(v)) for v in vals]
assert_array_almost_equal(scalar_result, norm_instance(vals))
def _mask_tester(norm_instance, vals):
"""
Checks mask handling
"""
masked_array = np.ma.array(vals)
masked_array[0] = np.ma.masked
assert_array_equal(masked_array.mask, norm_instance(masked_array).mask)
@image_comparison(baseline_images=['levels_and_colors'],
extensions=['png'])
def test_cmap_and_norm_from_levels_and_colors():
data = np.linspace(-2, 4, 49).reshape(7, 7)
levels = [-1, 2, 2.5, 3]
colors = ['red', 'green', 'blue', 'yellow', 'black']
extend = 'both'
cmap, norm = mcolors.from_levels_and_colors(levels, colors, extend=extend)
ax = plt.axes()
m = plt.pcolormesh(data, cmap=cmap, norm=norm)
plt.colorbar(m)
# Hide the axes labels (but not the colorbar ones, as they are useful)
for lab in ax.get_xticklabels() + ax.get_yticklabels():
lab.set_visible(False)
def test_cmap_and_norm_from_levels_and_colors2():
levels = [-1, 2, 2.5, 3]
colors = ['red', (0, 1, 0), 'blue', (0.5, 0.5, 0.5), (0.0, 0.0, 0.0, 1.0)]
clr = mcolors.colorConverter.to_rgba_array(colors)
bad = (0.1, 0.1, 0.1, 0.1)
no_color = (0.0, 0.0, 0.0, 0.0)
masked_value = 'masked_value'
# Define the test values which are of interest.
# Note: levels are lev[i] <= v < lev[i+1]
tests = [('both', None, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: clr[4],
3.5: clr[4],
masked_value: bad}),
('min', -1, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: no_color,
3.5: no_color,
masked_value: bad}),
('max', -1, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: clr[3],
3.5: clr[3],
masked_value: bad}),
('neither', -2, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: no_color,
3.5: no_color,
masked_value: bad}),
]
for extend, i1, cases in tests:
cmap, norm = mcolors.from_levels_and_colors(levels, colors[0:i1],
extend=extend)
cmap.set_bad(bad)
for d_val, expected_color in cases.items():
if d_val == masked_value:
d_val = np.ma.array([1], mask=True)
else:
d_val = [d_val]
assert_array_equal(expected_color, cmap(norm(d_val))[0],
'Wih extend={0!r} and data '
'value={1!r}'.format(extend, d_val))
assert_raises(ValueError, mcolors.from_levels_and_colors, levels, colors)
def test_rgb_hsv_round_trip():
for a_shape in [(500, 500, 3), (500, 3), (1, 3), (3,)]:
np.random.seed(0)
tt = np.random.random(a_shape)
assert_array_almost_equal(tt,
mcolors.hsv_to_rgb(mcolors.rgb_to_hsv(tt)))
assert_array_almost_equal(tt,
mcolors.rgb_to_hsv(mcolors.hsv_to_rgb(tt)))
@cleanup
def test_autoscale_masked():
# Test for #2336. Previously fully masked data would trigger a ValueError.
data = np.ma.masked_all((12, 20))
plt.pcolor(data)
plt.draw()
def test_colors_no_float():
# Gray must be a string to distinguish 3-4 grays from RGB or RGBA.
def gray_from_float_rgb():
return mcolors.colorConverter.to_rgb(0.4)
def gray_from_float_rgba():
return mcolors.colorConverter.to_rgba(0.4)
assert_raises(ValueError, gray_from_float_rgb)
assert_raises(ValueError, gray_from_float_rgba)
@image_comparison(baseline_images=['light_source_shading_topo'],
extensions=['png'])
def test_light_source_topo_surface():
"""Shades a DEM using different v.e.'s and blend modes."""
fname = cbook.get_sample_data('jacksboro_fault_dem.npz', asfileobj=False)
dem = np.load(fname)
elev = dem['elevation']
# Get the true cellsize in meters for accurate vertical exaggeration
# Convert from decimal degrees to meters
dx, dy = dem['dx'], dem['dy']
dx = 111320.0 * dx * np.cos(dem['ymin'])
dy = 111320.0 * dy
dem.close()
ls = mcolors.LightSource(315, 45)
cmap = cm.gist_earth
fig, axes = plt.subplots(nrows=3, ncols=3)
for row, mode in zip(axes, ['hsv', 'overlay', 'soft']):
for ax, ve in zip(row, [0.1, 1, 10]):
rgb = ls.shade(elev, cmap, vert_exag=ve, dx=dx, dy=dy,
blend_mode=mode)
ax.imshow(rgb)
ax.set(xticks=[], yticks=[])
@knownfailureif(V(np.__version__) >= V('1.9.0'))
def test_light_source_shading_default():
"""Array comparison test for the default "hsv" blend mode. Ensure the
default result doesn't change without warning."""
y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
z = 10 * np.cos(x**2 + y**2)
cmap = plt.cm.copper
ls = mcolors.LightSource(315, 45)
rgb = ls.shade(z, cmap)
# Result stored transposed and rounded for for more compact display...
expect = np.array([[[0.87, 0.85, 0.90, 0.90, 0.82, 0.62, 0.34, 0.00],
[0.85, 0.94, 0.99, 1.00, 1.00, 0.96, 0.62, 0.17],
[0.90, 0.99, 1.00, 1.00, 1.00, 1.00, 0.71, 0.33],
[0.90, 1.00, 1.00, 1.00, 1.00, 0.98, 0.51, 0.29],
[0.82, 1.00, 1.00, 1.00, 1.00, 0.64, 0.25, 0.13],
[0.62, 0.96, 1.00, 0.98, 0.64, 0.22, 0.06, 0.03],
[0.34, 0.62, 0.71, 0.51, 0.25, 0.06, 0.00, 0.01],
[0.00, 0.17, 0.33, 0.29, 0.13, 0.03, 0.01, 0.00]],
[[0.87, 0.79, 0.83, 0.80, 0.66, 0.44, 0.23, 0.00],
[0.79, 0.88, 0.93, 0.92, 0.83, 0.66, 0.38, 0.10],
[0.83, 0.93, 0.99, 1.00, 0.92, 0.75, 0.40, 0.18],
[0.80, 0.92, 1.00, 0.99, 0.93, 0.75, 0.28, 0.14],
[0.66, 0.83, 0.92, 0.93, 0.87, 0.44, 0.12, 0.06],
[0.44, 0.66, 0.75, 0.75, 0.44, 0.12, 0.03, 0.01],
[0.23, 0.38, 0.40, 0.28, 0.12, 0.03, 0.00, 0.00],
[0.00, 0.10, 0.18, 0.14, 0.06, 0.01, 0.00, 0.00]],
[[0.87, 0.75, 0.78, 0.73, 0.55, 0.33, 0.16, 0.00],
[0.75, 0.85, 0.90, 0.86, 0.71, 0.48, 0.23, 0.05],
[0.78, 0.90, 0.98, 1.00, 0.82, 0.51, 0.21, 0.08],
[0.73, 0.86, 1.00, 0.97, 0.84, 0.47, 0.11, 0.05],
[0.55, 0.71, 0.82, 0.84, 0.71, 0.20, 0.03, 0.01],
[0.33, 0.48, 0.51, 0.47, 0.20, 0.02, 0.00, 0.00],
[0.16, 0.23, 0.21, 0.11, 0.03, 0.00, 0.00, 0.00],
[0.00, 0.05, 0.08, 0.05, 0.01, 0.00, 0.00, 0.00]],
[[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]]]).T
assert_array_almost_equal(rgb, expect, decimal=2)
@knownfailureif(V(np.__version__) >= V('1.9.0') or
V(np.__version__) < V('1.7.0'))
def test_light_source_masked_shading():
"""Array comparison test for a surface with a masked portion. Ensures that
we don't wind up with "fringes" of odd colors around masked regions."""
y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
z = 10 * np.cos(x**2 + y**2)
z = np.ma.masked_greater(z, 9.9)
cmap = plt.cm.copper
ls = mcolors.LightSource(315, 45)
rgb = ls.shade(z, cmap)
# Result stored transposed and rounded for for more compact display...
expect = np.array([[[1.00, 0.95, 0.96, 0.94, 0.86, 0.67, 0.40, 0.03],
[0.95, 0.99, 1.00, 1.00, 1.00, 0.98, 0.67, 0.19],
[0.96, 1.00, 1.00, 1.00, 1.00, 1.00, 0.78, 0.36],
[0.94, 1.00, 1.00, 0.00, 0.00, 1.00, 0.55, 0.32],
[0.86, 1.00, 1.00, 0.00, 0.00, 1.00, 0.27, 0.14],
[0.67, 0.98, 1.00, 1.00, 1.00, 1.00, 0.07, 0.03],
[0.40, 0.67, 0.78, 0.55, 0.27, 0.07, 0.00, 0.01],
[0.03, 0.19, 0.36, 0.32, 0.14, 0.03, 0.01, 0.00]],
[[1.00, 0.93, 0.93, 0.88, 0.72, 0.50, 0.28, 0.03],
[0.93, 0.97, 0.99, 0.96, 0.87, 0.70, 0.42, 0.11],
[0.93, 0.99, 0.74, 0.78, 0.78, 0.74, 0.45, 0.20],
[0.88, 0.96, 0.78, 0.00, 0.00, 0.78, 0.32, 0.16],
[0.72, 0.87, 0.78, 0.00, 0.00, 0.78, 0.14, 0.06],
[0.50, 0.70, 0.74, 0.78, 0.78, 0.74, 0.03, 0.01],
[0.28, 0.42, 0.45, 0.32, 0.14, 0.03, 0.00, 0.00],
[0.03, 0.11, 0.20, 0.16, 0.06, 0.01, 0.00, 0.00]],
[[1.00, 0.91, 0.91, 0.84, 0.64, 0.39, 0.21, 0.03],
[0.91, 0.96, 0.98, 0.93, 0.77, 0.53, 0.27, 0.06],
[0.91, 0.98, 0.47, 0.50, 0.50, 0.47, 0.25, 0.10],
[0.84, 0.93, 0.50, 0.00, 0.00, 0.50, 0.13, 0.06],
[0.64, 0.77, 0.50, 0.00, 0.00, 0.50, 0.03, 0.01],
[0.39, 0.53, 0.47, 0.50, 0.50, 0.47, 0.00, 0.00],
[0.21, 0.27, 0.25, 0.13, 0.03, 0.00, 0.00, 0.00],
[0.03, 0.06, 0.10, 0.06, 0.01, 0.00, 0.00, 0.00]],
[[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]]]).T
assert_array_almost_equal(rgb, expect, decimal=2)
def test_light_source_hillshading():
"""Compare the current hillshading method against one that should be
mathematically equivalent. Illuminates a cone from a range of angles."""
def alternative_hillshade(azimuth, elev, z):
illum = _sph2cart(*_azimuth2math(azimuth, elev))
illum = np.array(illum)
dy, dx = np.gradient(-z)
dy = -dy
dz = np.ones_like(dy)
normals = np.dstack([dx, dy, dz])
dividers = np.zeros_like(z)[..., None]
for i, mat in enumerate(normals):
for j, vec in enumerate(mat):
dividers[i, j, 0] = np.linalg.norm(vec)
normals /= dividers
# once we drop support for numpy 1.7.x the above can be written as
# normals /= np.linalg.norm(normals, axis=2)[..., None]
# aviding the double loop.
intensity = np.tensordot(normals, illum, axes=(2, 0))
intensity -= intensity.min()
intensity /= intensity.ptp()
return intensity
y, x = np.mgrid[5:0:-1, :5]
z = -np.hypot(x - x.mean(), y - y.mean())
for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
ls = mcolors.LightSource(az, elev)
h1 = ls.hillshade(z)
h2 = alternative_hillshade(az, elev, z)
assert_array_almost_equal(h1, h2)
def test_light_source_planar_hillshading():
"""Ensure that the illumination intensity is correct for planar
surfaces."""
def plane(azimuth, elevation, x, y):
"""Create a plane whose normal vector is at the given azimuth and
elevation."""
theta, phi = _azimuth2math(azimuth, elevation)
a, b, c = _sph2cart(theta, phi)
z = -(a*x + b*y) / c
return z
def angled_plane(azimuth, elevation, angle, x, y):
"""Create a plane whose normal vector is at an angle from the given
azimuth and elevation."""
elevation = elevation + angle
if elevation > 90:
azimuth = (azimuth + 180) % 360
elevation = (90 - elevation) % 90
return plane(azimuth, elevation, x, y)
y, x = np.mgrid[5:0:-1, :5]
for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
ls = mcolors.LightSource(az, elev)
# Make a plane at a range of angles to the illumination
for angle in range(0, 105, 15):
z = angled_plane(az, elev, angle, x, y)
h = ls.hillshade(z)
assert_array_almost_equal(h, np.cos(np.radians(angle)))
def _sph2cart(theta, phi):
x = np.cos(theta) * np.sin(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(phi)
return x, y, z
def _azimuth2math(azimuth, elevation):
"""Converts from clockwise-from-north and up-from-horizontal to
mathematical conventions."""
theta = np.radians((90 - azimuth) % 360)
phi = np.radians(90 - elevation)
return theta, phi
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| 5,089
| 0
| 209
|
e4ea4c4a24c17086538cd21bb2c6bfd5a1d14c23
| 1,958
|
py
|
Python
|
application/preprocessing/stopwords.py
|
danorel/CD-Inverted-Index
|
88a4eee855fe32fdb41602112ded24a66618431a
|
[
"MIT"
] | 1
|
2021-05-19T10:07:09.000Z
|
2021-05-19T10:07:09.000Z
|
application/preprocessing/stopwords.py
|
danorel/CD-Inverted-Index
|
88a4eee855fe32fdb41602112ded24a66618431a
|
[
"MIT"
] | null | null | null |
application/preprocessing/stopwords.py
|
danorel/CD-Inverted-Index
|
88a4eee855fe32fdb41602112ded24a66618431a
|
[
"MIT"
] | null | null | null |
import enum
| 26.106667
| 148
| 0.508682
|
import enum
class Languages(enum.Enum):
ukrainian = 1
class StopwordsCleaner:
@classmethod
def __choose_language(cls,
language) -> list:
"""
Lemmatizer choosing
:type language: str
"""
# NOT DONE YET
if language == Languages.ukrainian.name:
return []
else:
from nltk.corpus import \
stopwords
return stopwords.words(language)
@classmethod
def __tokenize(cls,
input) -> list:
"""
Tokenization of input data
:type input: object
"""
from nltk.tokenize import \
word_tokenize
if isinstance(input, str):
tokens = word_tokenize(
text=input,
)
elif isinstance(input, list):
tokens = [
str(token)
for token in input[:]
]
else:
raise AttributeError(
"[ERROR]: Can't apply the tokenization for your input data. This operation could be applied only for input data type of list or str"
)
return tokens
@classmethod
def apply(cls,
input,
language) -> str:
"""
Apply the chosen lemmatizer per your corpora
:type input: object
:type language: str
"""
"""
Choose the stopwords vocabulary list, which contains all available stop-words of entered language
"""
stopwords = cls.__choose_language(
language=language
)
"""
Lemmatize received tokens from function self.__tokenize()
"""
cleaned = [
token
for token in cls.__tokenize(input)
if not (token in stopwords)
]
"""
Extract the text from the lemmatized tokens
"""
return ' '.join(cleaned)
| 0
| 1,898
| 46
|
4971af2ccef80f51f5261674f9c7be7869a55aa6
| 753
|
py
|
Python
|
certn-python/certn/api/auth.py
|
livebungalow/certn-python-public
|
aa411626a2918e37c3bbe26023b1b97014860414
|
[
"MIT"
] | null | null | null |
certn-python/certn/api/auth.py
|
livebungalow/certn-python-public
|
aa411626a2918e37c3bbe26023b1b97014860414
|
[
"MIT"
] | null | null | null |
certn-python/certn/api/auth.py
|
livebungalow/certn-python-public
|
aa411626a2918e37c3bbe26023b1b97014860414
|
[
"MIT"
] | 1
|
2019-07-04T00:19:15.000Z
|
2019-07-04T00:19:15.000Z
|
from certn.api.api import API
| 27.888889
| 89
| 0.612218
|
from certn.api.api import API
class Auth(API):
path = '/api/v1'
def login(self):
'''basic authentication returns the user id and token'''
response = self.client.post(
path=f'{self.path}/authenticate/',
data={'expires': None},
is_authenticated=False,
)
return response.get('user_id'), response.get('token')
def list(self):
'''List all logged in sessions'''
return self.client.get(path=f'{self.path}/authenticate/', is_authenticated=False)
def logout(self):
self.client.post(path=f'{self.path}/logout/', data=None, is_json=False)
def logout_all(self):
self.client.post(path=f'{self.path}/logoutall/', data=None, is_json=False)
| 159
| 540
| 23
|
0f358cfd4138a54754752990868e328e80ac937b
| 442
|
py
|
Python
|
examples/migrations/0005_advancedfields_file_field_two.py
|
sayanjap/DynamicForms
|
071707de36d109fe3a17ae5df239240ea5ba707f
|
[
"BSD-3-Clause"
] | 42
|
2018-01-18T14:50:05.000Z
|
2022-03-24T18:34:19.000Z
|
examples/migrations/0005_advancedfields_file_field_two.py
|
sayanjap/DynamicForms
|
071707de36d109fe3a17ae5df239240ea5ba707f
|
[
"BSD-3-Clause"
] | 14
|
2018-12-05T21:39:23.000Z
|
2022-02-27T06:43:48.000Z
|
examples/migrations/0005_advancedfields_file_field_two.py
|
sayanjap/DynamicForms
|
071707de36d109fe3a17ae5df239240ea5ba707f
|
[
"BSD-3-Clause"
] | 5
|
2018-01-18T16:32:20.000Z
|
2021-06-07T10:15:18.000Z
|
# Generated by Django 3.0.10 on 2020-09-15 07:07
from django.db import migrations, models
| 23.263158
| 82
| 0.628959
|
# Generated by Django 3.0.10 on 2020-09-15 07:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('examples', '0004_basicfields_password_field'),
]
operations = [
migrations.AddField(
model_name='advancedfields',
name='file_field_two',
field=models.FileField(blank=True, null=True, upload_to='examples2/'),
),
]
| 0
| 327
| 23
|
6ccf625c3351d592e09c1eca0c6a727ba7e80593
| 19,012
|
py
|
Python
|
website/ctrl/forum.py
|
Kapzy9r/wwmmo
|
67038053613d714a50653001ddd05021a13b6949
|
[
"MIT"
] | 1
|
2020-04-14T11:32:58.000Z
|
2020-04-14T11:32:58.000Z
|
website/ctrl/forum.py
|
Kapzy9r/wwmmo
|
67038053613d714a50653001ddd05021a13b6949
|
[
"MIT"
] | null | null | null |
website/ctrl/forum.py
|
Kapzy9r/wwmmo
|
67038053613d714a50653001ddd05021a13b6949
|
[
"MIT"
] | null | null | null |
import datetime
import re
import logging
import random
from google.appengine.api import memcache
from google.appengine.api import mail
from google.appengine.api import search
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import deferred
import ctrl
import ctrl.tmpl
import ctrl.profile
import model.forum
def getForums():
"""Returns all of the non-alliance-specific forums."""
keyname = "forums"
forums = memcache.get(keyname)
if not forums:
forums = []
for forum in model.forum.Forum.all():
if forum.alliance:
continue
forums.append(forum)
memcache.set(keyname, forums, time=3600)
return forums
def getAllianceForum(realm_name, alliance):
"""Returns the forum for the given alliance."""
keyname = "forums:alliance:%s-%d" % (realm_name, alliance.alliance_id)
forum = memcache.get(keyname)
if not forum:
for f in model.forum.Forum.all().filter("alliance", realm_name+":"+str(alliance.alliance_id)).fetch(1):
forum = f
break
if forum:
memcache.set(keyname, forum, time=3600)
if not forum:
# if there's not one, we'll create one
forum = model.forum.Forum(name=alliance.name, slug="alliance:"+realm_name.lower()+":"+ctrl.makeSlug(alliance.name),
description="Private alliance forum for "+alliance.name)
forum.alliance = realm_name+":"+str(alliance.alliance_id)
forum.put()
memcache.set(keyname, forum, time=3600)
return forum
def isModerator(forum, user):
"""Determines whether the given user is a moderator of this forum or not."""
if not forum.moderators:
return False
if not user:
return False
for mod in forum.moderators:
if mod.user_id() == user.user_id():
return True
return False
def canPost(forum, user):
"""Determines whether the given user can post to this forum or not."""
if not forum.allowed_posters:
return True
if not user:
return True
for poster in forum.allowed_posters:
if poster.user_id() == user.user_id():
return True
return False
def getTopThreadsPerForum(forums):
"""For each forum, returns the 'top' thread, which we'll display in the forum list page.
The 'top' thread is the most-recently created thread. When you click through to the forum, the
top thread will actually be the thread with the most recent reply, so it's slightly different."""
keynames = []
for forum in forums:
keynames.append("forum:%s:top-thread" % (forum.slug))
cache_mapping = memcache.get_multi(keynames)
# fetch any from the data store that weren't cached
for forum in forums:
keyname = "forum:%s:top-thread" % (forum.slug)
if keyname not in cache_mapping:
query = model.forum.ForumThread.all().filter("forum", forum).order("-posted").fetch(1)
for forum_thread in query:
cache_mapping[keyname] = forum_thread
break
memcache.set_multi(cache_mapping)
# convert from our (internal) memcache key names to a more reasonable key
top_threads = {}
for forum in forums:
keyname = "forum:%s:top-thread" % (forum.slug)
if keyname in cache_mapping:
top_threads[forum.slug] = cache_mapping[keyname]
return top_threads
def getPostsForUser(profile, include_alliance_posts, page_no, page_size):
"""Gets the posts that the given user has made.
Args:
profile The profile of the user whose posts we're after.
include_alliance_posts: If True, we'll include posts the user has made to their alliance forum."""
if not profile.user:
# The user will need to log in to the website in order to set the user.
return []
user_id = profile.key().name()
keyname = 'forum:threads-per-user:%s:%d:%d:%d' % (str(user_id), int(include_alliance_posts), page_no, page_size)
posts = memcache.get(keyname)
if not posts:
query = model.forum.ForumPost.all().filter("user", profile.user)
if not include_alliance_posts and profile.alliance_id:
alliance = model.profile.Alliance.Fetch(profile.realm_name, profile.alliance_id)
if alliance:
alliance_forum = getAllianceForum(profile.realm_name, alliance)
query.filter("forum !=", alliance_forum).order("forum")
query = query.order("-posted")
if page_no == 0:
it = query.run(limit=page_size)
else:
cursor = ctrl.findCursor(query, "forum-posts-for-user:%s:%d" % (str(user_id), int(include_alliance_posts)), page_no, page_size)
it = query.with_cursor(cursor)
posts = []
for post in it:
posts.append(post)
memcache.set(keyname, posts)
return posts
def getLastPostsByForumThread(forum_threads):
"""For each thread in the given list, returns the most recent post in that thread."""
keynames = []
for forum_thread in forum_threads:
keynames.append("forum:%s:%s:last-post" % (forum_thread.forum.slug, forum_thread.slug))
cache_mapping = memcache.get_multi(keynames)
for forum_thread in forum_threads:
keyname = "forum:%s:%s:last-post" % (forum_thread.forum.slug, forum_thread.slug)
if keyname not in cache_mapping:
query = model.forum.ForumPost.all().ancestor(forum_thread).order("-posted").fetch(1)
for post in query:
cache_mapping[keyname] = post
break
memcache.set_multi(cache_mapping)
last_posts = {}
for forum_thread in forum_threads:
keyname = "forum:%s:%s:last-post" % (forum_thread.forum.slug, forum_thread.slug)
if keyname in cache_mapping:
last_posts[forum_thread.key()] = cache_mapping[keyname]
return last_posts
def getFirstPostsByForumThread(forum_threads):
"""For each thread in the given list, returns the first post in that thread (i.e. the one that was originally posted by the created of the thread)."""
keynames = []
for forum_thread in forum_threads:
keynames.append("forum:%s:%s:first-post" % (forum_thread.forum.slug, forum_thread.slug))
cache_mapping = memcache.get_multi(keynames)
for forum_thread in forum_threads:
keyname = "forum:%s:%s:first-post" % (forum_thread.forum.slug, forum_thread.slug)
if keyname not in cache_mapping:
query = model.forum.ForumPost.all().ancestor(forum_thread).order("posted").fetch(1)
for post in query:
cache_mapping[keyname] = post
break
memcache.set_multi(cache_mapping)
first_posts = {}
for forum_thread in forum_threads:
keyname = "forum:%s:%s:first-post" % (forum_thread.forum.slug, forum_thread.slug)
if keyname in cache_mapping:
first_posts[forum_thread.key()] = cache_mapping[keyname]
return first_posts
def getForumThreadPostCounts():
"""Helper method that returns a mapping of all the forum thread/post counts.
This is more efficient than calling getCount() for each one individually. It's
used by the "forum list" page to display the list of forums."""
keyname = "counter:forum-thread-post-counts"
counts = memcache.get(keyname)
if not counts:
counts = {}
for counter in (model.forum.ForumShardedCounter.all().filter("name >=", "forum")
.filter("name <", "forum\ufffd")):
first_colon = counter.name.find(":")
last_colon = counter.name.rfind(":")
parts = [counter.name[:first_colon], counter.name[first_colon+1:last_colon], counter.name[last_colon+1:]]
if parts[1] not in counts:
counts[parts[1]] = {}
if parts[2] not in counts[parts[1]]:
counts[parts[1]][parts[2]] = counter.count
else:
counts[parts[1]][parts[2]] += counter.count
memcache.set(keyname, counts, 3600)
return counts
def getThreadPostCounts(forum_threads):
"""Helper for retrieving the post count of a list of threads.
This is more efficient than calling getCounter() on each on individually."""
keynames = []
for forum_thread in forum_threads:
keynames.append("counter:thread:%s:%s:posts" % (forum_thread.forum.slug, forum_thread.slug))
counts = memcache.get_multi(keynames)
for forum_thread in forum_threads:
counter_name = "thread:%s:%s:posts" % (forum_thread.forum.slug, forum_thread.slug)
keyname = "counter:%s" % (counter_name)
if keyname not in counts:
count = 0
for counter in model.forum.ForumShardedCounter.all().filter("name", counter_name):
count += counter.count
counts[keyname] = count
memcache.set(keyname, count)
post_counts = {}
for forum_thread in forum_threads:
keyname = "counter:thread:%s:%s:posts" % (forum_thread.forum.slug, forum_thread.slug)
post_counts["%s:%s" % (forum_thread.forum.slug, forum_thread.slug)] = counts[keyname]
return post_counts
def getCount(counter_name):
"""Gets the value of the given counter.
For example, "forum:slug:posts" gives the number of posts in the forum with the slug
"slug". This uses the sharded counter to store the count more efficiently."""
keyname = "counter:%s" % (counter_name)
count = memcache.get(keyname)
if not count:
count = 0
for counter in model.forum.ForumShardedCounter.all().filter("name", counter_name):
count += counter.count
memcache.set(keyname, count)
return count
def incrCount(counter_name, num_shards=20, amount=1):
"""Increments the given counter by one.
See getCount() for example of the counter_names."""
db.run_in_transaction(_tx)
keyname = "counter:%s" % (counter_name)
memcache.incr(keyname)
def subscribeToThread(user, forum_thread):
"""Subscribes the given user to the given forum thread so that they recieve updates via email."""
# if they're already a subscriber, nothing to do!
keyname = "thread:%s:subscribers" % (forum_thread.key())
model_key_name = "%s:%s" % (user.user_id(), forum_thread.key())
subscribers = memcache.get(keyname)
if not subscribers:
thread_subscription = model.forum.ForumThreadSubscriber.get_by_key_name(model_key_name)
if thread_subscription:
return
elif user.user_id() in subscribers:
return
thread_subscription = model.forum.ForumThreadSubscriber(key_name=model_key_name,
user=user,
forum_thread=forum_thread,
subscribed=datetime.datetime.now())
thread_subscription.put()
# we manually re-cache the subscription with the new one, because it can take a while for
# datastore indexes to update, but we want it to be instant!
subscribers = getThreadSubscriptions(forum_thread, False)
if user.user_id() not in subscribers:
subscribers[user.user_id()] = thread_subscription
memcache.set(keyname, subscribers)
def unsubscribeFromThread(user, forum_thread):
"""Unsubscribes the given user from the given forum thread."""
keyname = "thread:%s:subscribers" % (forum_thread.key())
model_key_name = "%s:%s" % (user.user_id(), forum_thread.key())
thread_subscription = model.forum.ForumThreadSubscriber.get_by_key_name(model_key_name)
if not thread_subscription:
return
thread_subscription.delete()
# we manually re-cache the subscription with this one removed, because it can take a while for
# datastore indexes to update, but we want it to be instant!
subscribers = getThreadSubscriptions(forum_thread, False)
if user.user_id() in subscribers:
del subscribers[user.user_id()]
memcache.set(keyname, subscribers)
def getThreadSubscriptions(forum_thread, doset=True):
"""Gets the list of ForumThreadSubscribers who are subscribed to the given thread."""
keyname = "thread:%s:subscribers" % (forum_thread.key())
subscribers = memcache.get(keyname)
if subscribers is None: # note: empty list is OK, None is not...
subscribers = {}
query = model.forum.ForumThreadSubscriber.all().filter("forum_thread", forum_thread)
for subscriber in query:
subscribers[subscriber.user.user_id()] = subscriber
if doset:
memcache.set(keyname, subscribers)
return subscribers
def notifySubscribers(forum, forum_thread, forum_post, poster_user, poster_profile):
"""Sends an email notification to all subscribers of the given thread.
Arguments:
forum: The forum that was posted to.
forum_thread: The forum thread we posted to.
forum_post: The post that was just made
poster_user: The user who posted (we don't send notifications to this user).
poster_profile: The profile of the user who posted.
"""
subscriptions = getThreadSubscriptions(forum_thread)
tmpl = ctrl.tmpl.getTemplate("email/forum_notification.txt")
user_ids = []
for user_id, subscription in subscriptions.items():
user_ids.append(user_id)
profiles = ctrl.profile.getProfiles(user_ids)
for user_id, subscription in subscriptions.items():
if user_id == poster_user.user_id():
continue
body = ctrl.tmpl.render(tmpl, {"forum": forum, "forum_thread": forum_thread, "forum_post": forum_post,
"poster_user": poster_user, "poster_profile": poster_profile,
"profile": profiles[user_id]})
sender = "forums@warworldssite.appspotmail.com"
recipient = subscription.user.email()
if recipient:
logging.info("Sending email: {from:"+sender+", recipient:"+recipient+", subject:[war-worlds.com forums] "+
forum_thread.subject+", body:"+str(len(body))+" bytes")
mail.send_mail(sender, recipient, "[war-worlds.com forums] "+forum_thread.subject, body)
def _indexForumThread(forum_thread, new_forum_post = None):
"""Does the actual work of indexing the given forum thread. We expect to be called in a deferred handler."""
forum= forum_thread.forum
fields = [search.TextField(name="subject", value=forum_thread.subject),
search.DateField(name="posted", value=forum_thread.posted),
search.DateField(name="last_post", value=forum_thread.last_post),
search.AtomField(name="forum", value=forum.slug.replace(":", "_"))]
if forum.alliance:
fields.append(search.AtomField(name="alliance", value=forum.alliance.replace(":", "_")))
else:
fields.append(search.AtomField(name="alliance", value="NA"))
content = ""
for forum_post in model.forum.ForumPost.all().ancestor(forum_thread).order("posted"):
if new_forum_post and str(forum_post.key()) == str(new_forum_post.key()):
new_forum_post = None
content += "\r\n<hr />\r\n" + forum_post.content
if new_forum_post:
content = new_forum_post.content + content
fields.append(search.HtmlField(name="content", value=content))
doc = search.Document(
doc_id = str(forum_thread.key()),
fields = fields)
index = search.Index(name="forum")
index.put(doc)
def indexForumThread(forum_thread, forum_post = None):
"""Queues the given forum thread to be indexed."""
deferred.defer(_indexForumThread, forum_thread, forum_post, _queue="forumsync")
| 34.504537
| 152
| 0.688828
|
import datetime
import re
import logging
import random
from google.appengine.api import memcache
from google.appengine.api import mail
from google.appengine.api import search
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import deferred
import ctrl
import ctrl.tmpl
import ctrl.profile
import model.forum
def getForums():
"""Returns all of the non-alliance-specific forums."""
keyname = "forums"
forums = memcache.get(keyname)
if not forums:
forums = []
for forum in model.forum.Forum.all():
if forum.alliance:
continue
forums.append(forum)
memcache.set(keyname, forums, time=3600)
return forums
def canViewAllianceForum(forum, profile):
if not forum.alliance:
return True
if forum.alliance == profile.realm_name + ":" + str(profile.alliance_id):
return True
return False
def getAllianceForum(realm_name, alliance):
"""Returns the forum for the given alliance."""
keyname = "forums:alliance:%s-%d" % (realm_name, alliance.alliance_id)
forum = memcache.get(keyname)
if not forum:
for f in model.forum.Forum.all().filter("alliance", realm_name+":"+str(alliance.alliance_id)).fetch(1):
forum = f
break
if forum:
memcache.set(keyname, forum, time=3600)
if not forum:
# if there's not one, we'll create one
forum = model.forum.Forum(name=alliance.name, slug="alliance:"+realm_name.lower()+":"+ctrl.makeSlug(alliance.name),
description="Private alliance forum for "+alliance.name)
forum.alliance = realm_name+":"+str(alliance.alliance_id)
forum.put()
memcache.set(keyname, forum, time=3600)
return forum
def getForumBySlug(forum_slug):
keyname = "forums:%s" % (forum_slug)
forum = memcache.get(keyname)
if not forum:
for f in model.forum.Forum.all().filter("slug", forum_slug).fetch(1):
forum = f
break
if forum:
memcache.set(keyname, forum, time=3600)
return forum
def isModerator(forum, user):
"""Determines whether the given user is a moderator of this forum or not."""
if not forum.moderators:
return False
if not user:
return False
for mod in forum.moderators:
if mod.user_id() == user.user_id():
return True
return False
def canPost(forum, user):
"""Determines whether the given user can post to this forum or not."""
if not forum.allowed_posters:
return True
if not user:
return True
for poster in forum.allowed_posters:
if poster.user_id() == user.user_id():
return True
return False
def getThreads(forum, page_no, page_size):
keyname = 'forum:threads:%s:%d:%d' % (str(forum.key()), page_no, page_size)
threads = memcache.get(keyname)
if not threads:
query = model.forum.ForumThread.all().filter("forum", forum)
query = query.order("-last_post")
if page_no == 0:
it = query.run(limit=page_size)
else:
cursor = ctrl.findCursor(query, "forum-threads:%s" % (str(forum.key())), page_no, page_size)
it = query.with_cursor(cursor)
threads = []
n = 0
for thread in it:
n += 1
if thread.is_sticky:
# note: stickies will make the page slightly smaller, but it should
# only ever be one or two anyway...
continue
threads.append(thread)
if n >= page_size:
break
memcache.set(keyname, threads)
return threads
def searchThreads(forum, query, profile):
index = search.Index("forum")
if forum:
query = "forum:" + forum.slug.replace(":", "_") + " " + query
if not profile or not profile.alliance_id:
query = "alliance:NA " + query
else:
alliance = profile.realm_name+"_"+str(profile.alliance_id)
query = "(alliance:NA OR alliance:"+alliance+") " + query
logging.info("Query: "+query)
results = index.search(search.Query(query_string=query,
options=search.QueryOptions(ids_only=True)))
threads = []
for doc in results:
threads.append(model.forum.ForumThread.get(doc.doc_id))
return threads
def getStickyThreads(forum):
keyname = 'forum:sticky-threads:%s' % (str(forum.key()))
sticky_threads = memcache.get(keyname)
if not sticky_threads:
query = model.forum.ForumThread.all().filter("forum", forum)
query = query.filter("is_sticky", True)
query = query.order("-last_post")
it = query.run()
sticky_threads = []
for thread in it:
sticky_threads.append(thread)
memcache.set(keyname, sticky_threads)
return sticky_threads
def toggleSticky(forum, forum_thread):
forum_thread.is_sticky = not forum_thread.is_sticky
forum_thread.put()
memcache.flush_all()
def getTopThreadsPerForum(forums):
"""For each forum, returns the 'top' thread, which we'll display in the forum list page.
The 'top' thread is the most-recently created thread. When you click through to the forum, the
top thread will actually be the thread with the most recent reply, so it's slightly different."""
keynames = []
for forum in forums:
keynames.append("forum:%s:top-thread" % (forum.slug))
cache_mapping = memcache.get_multi(keynames)
# fetch any from the data store that weren't cached
for forum in forums:
keyname = "forum:%s:top-thread" % (forum.slug)
if keyname not in cache_mapping:
query = model.forum.ForumThread.all().filter("forum", forum).order("-posted").fetch(1)
for forum_thread in query:
cache_mapping[keyname] = forum_thread
break
memcache.set_multi(cache_mapping)
# convert from our (internal) memcache key names to a more reasonable key
top_threads = {}
for forum in forums:
keyname = "forum:%s:top-thread" % (forum.slug)
if keyname in cache_mapping:
top_threads[forum.slug] = cache_mapping[keyname]
return top_threads
def getPostsForUser(profile, include_alliance_posts, page_no, page_size):
"""Gets the posts that the given user has made.
Args:
profile The profile of the user whose posts we're after.
include_alliance_posts: If True, we'll include posts the user has made to their alliance forum."""
if not profile.user:
# The user will need to log in to the website in order to set the user.
return []
user_id = profile.key().name()
keyname = 'forum:threads-per-user:%s:%d:%d:%d' % (str(user_id), int(include_alliance_posts), page_no, page_size)
posts = memcache.get(keyname)
if not posts:
query = model.forum.ForumPost.all().filter("user", profile.user)
if not include_alliance_posts and profile.alliance_id:
alliance = model.profile.Alliance.Fetch(profile.realm_name, profile.alliance_id)
if alliance:
alliance_forum = getAllianceForum(profile.realm_name, alliance)
query.filter("forum !=", alliance_forum).order("forum")
query = query.order("-posted")
if page_no == 0:
it = query.run(limit=page_size)
else:
cursor = ctrl.findCursor(query, "forum-posts-for-user:%s:%d" % (str(user_id), int(include_alliance_posts)), page_no, page_size)
it = query.with_cursor(cursor)
posts = []
for post in it:
posts.append(post)
memcache.set(keyname, posts)
return posts
def getLastPostsByForumThread(forum_threads):
"""For each thread in the given list, returns the most recent post in that thread."""
keynames = []
for forum_thread in forum_threads:
keynames.append("forum:%s:%s:last-post" % (forum_thread.forum.slug, forum_thread.slug))
cache_mapping = memcache.get_multi(keynames)
for forum_thread in forum_threads:
keyname = "forum:%s:%s:last-post" % (forum_thread.forum.slug, forum_thread.slug)
if keyname not in cache_mapping:
query = model.forum.ForumPost.all().ancestor(forum_thread).order("-posted").fetch(1)
for post in query:
cache_mapping[keyname] = post
break
memcache.set_multi(cache_mapping)
last_posts = {}
for forum_thread in forum_threads:
keyname = "forum:%s:%s:last-post" % (forum_thread.forum.slug, forum_thread.slug)
if keyname in cache_mapping:
last_posts[forum_thread.key()] = cache_mapping[keyname]
return last_posts
def getFirstPostsByForumThread(forum_threads):
"""For each thread in the given list, returns the first post in that thread (i.e. the one that was originally posted by the created of the thread)."""
keynames = []
for forum_thread in forum_threads:
keynames.append("forum:%s:%s:first-post" % (forum_thread.forum.slug, forum_thread.slug))
cache_mapping = memcache.get_multi(keynames)
for forum_thread in forum_threads:
keyname = "forum:%s:%s:first-post" % (forum_thread.forum.slug, forum_thread.slug)
if keyname not in cache_mapping:
query = model.forum.ForumPost.all().ancestor(forum_thread).order("posted").fetch(1)
for post in query:
cache_mapping[keyname] = post
break
memcache.set_multi(cache_mapping)
first_posts = {}
for forum_thread in forum_threads:
keyname = "forum:%s:%s:first-post" % (forum_thread.forum.slug, forum_thread.slug)
if keyname in cache_mapping:
first_posts[forum_thread.key()] = cache_mapping[keyname]
return first_posts
def getThreadBySlug(forum, forum_thread_slug):
keyname = "forum:thread:%s:%s" % (forum.slug, forum_thread_slug)
forum_thread = memcache.get(keyname)
if not forum_thread:
for ft in model.forum.ForumThread.all().filter("forum", forum).filter("slug", forum_thread_slug).fetch(1):
forum_thread = ft
break
if forum_thread:
memcache.set(keyname, forum_thread, time=3600)
return forum_thread
def getPosts(forum, forum_thread, page_no, page_size):
keyname = 'forum:posts:%s:%d:%d' % (str(forum_thread.key()), page_no, page_size)
posts = memcache.get(keyname)
if not posts:
query = model.forum.ForumPost.all().ancestor(forum_thread)
query = query.order("posted")
if page_no == 0:
it = query.run(limit=page_size)
else:
cursor = ctrl.findCursor(query, "forum-posts:%s" % (str(forum_thread.key())), page_no, page_size)
it = query.with_cursor(cursor)
posts = []
for post in it:
posts.append(post)
if len(posts) >= page_size:
break
memcache.set(keyname, posts)
return posts
def getForumThreadPostCounts():
"""Helper method that returns a mapping of all the forum thread/post counts.
This is more efficient than calling getCount() for each one individually. It's
used by the "forum list" page to display the list of forums."""
keyname = "counter:forum-thread-post-counts"
counts = memcache.get(keyname)
if not counts:
counts = {}
for counter in (model.forum.ForumShardedCounter.all().filter("name >=", "forum")
.filter("name <", "forum\ufffd")):
first_colon = counter.name.find(":")
last_colon = counter.name.rfind(":")
parts = [counter.name[:first_colon], counter.name[first_colon+1:last_colon], counter.name[last_colon+1:]]
if parts[1] not in counts:
counts[parts[1]] = {}
if parts[2] not in counts[parts[1]]:
counts[parts[1]][parts[2]] = counter.count
else:
counts[parts[1]][parts[2]] += counter.count
memcache.set(keyname, counts, 3600)
return counts
def getThreadPostCounts(forum_threads):
"""Helper for retrieving the post count of a list of threads.
This is more efficient than calling getCounter() on each on individually."""
keynames = []
for forum_thread in forum_threads:
keynames.append("counter:thread:%s:%s:posts" % (forum_thread.forum.slug, forum_thread.slug))
counts = memcache.get_multi(keynames)
for forum_thread in forum_threads:
counter_name = "thread:%s:%s:posts" % (forum_thread.forum.slug, forum_thread.slug)
keyname = "counter:%s" % (counter_name)
if keyname not in counts:
count = 0
for counter in model.forum.ForumShardedCounter.all().filter("name", counter_name):
count += counter.count
counts[keyname] = count
memcache.set(keyname, count)
post_counts = {}
for forum_thread in forum_threads:
keyname = "counter:thread:%s:%s:posts" % (forum_thread.forum.slug, forum_thread.slug)
post_counts["%s:%s" % (forum_thread.forum.slug, forum_thread.slug)] = counts[keyname]
return post_counts
def getCount(counter_name):
"""Gets the value of the given counter.
For example, "forum:slug:posts" gives the number of posts in the forum with the slug
"slug". This uses the sharded counter to store the count more efficiently."""
keyname = "counter:%s" % (counter_name)
count = memcache.get(keyname)
if not count:
count = 0
for counter in model.forum.ForumShardedCounter.all().filter("name", counter_name):
count += counter.count
memcache.set(keyname, count)
return count
def incrCount(counter_name, num_shards=20, amount=1):
"""Increments the given counter by one.
See getCount() for example of the counter_names."""
def _tx():
if num_shards == 1:
index = 0
else:
index = random.randint(0, num_shards - 1)
shard_name = counter_name+":"+str(index)
counter = model.forum.ForumShardedCounter.get_by_key_name(shard_name)
if not counter:
counter = model.forum.ForumShardedCounter(key_name=shard_name,
name=counter_name)
counter.count += amount
counter.put()
db.run_in_transaction(_tx)
keyname = "counter:%s" % (counter_name)
memcache.incr(keyname)
def subscribeToThread(user, forum_thread):
"""Subscribes the given user to the given forum thread so that they recieve updates via email."""
# if they're already a subscriber, nothing to do!
keyname = "thread:%s:subscribers" % (forum_thread.key())
model_key_name = "%s:%s" % (user.user_id(), forum_thread.key())
subscribers = memcache.get(keyname)
if not subscribers:
thread_subscription = model.forum.ForumThreadSubscriber.get_by_key_name(model_key_name)
if thread_subscription:
return
elif user.user_id() in subscribers:
return
thread_subscription = model.forum.ForumThreadSubscriber(key_name=model_key_name,
user=user,
forum_thread=forum_thread,
subscribed=datetime.datetime.now())
thread_subscription.put()
# we manually re-cache the subscription with the new one, because it can take a while for
# datastore indexes to update, but we want it to be instant!
subscribers = getThreadSubscriptions(forum_thread, False)
if user.user_id() not in subscribers:
subscribers[user.user_id()] = thread_subscription
memcache.set(keyname, subscribers)
def unsubscribeFromThread(user, forum_thread):
"""Unsubscribes the given user from the given forum thread."""
keyname = "thread:%s:subscribers" % (forum_thread.key())
model_key_name = "%s:%s" % (user.user_id(), forum_thread.key())
thread_subscription = model.forum.ForumThreadSubscriber.get_by_key_name(model_key_name)
if not thread_subscription:
return
thread_subscription.delete()
# we manually re-cache the subscription with this one removed, because it can take a while for
# datastore indexes to update, but we want it to be instant!
subscribers = getThreadSubscriptions(forum_thread, False)
if user.user_id() in subscribers:
del subscribers[user.user_id()]
memcache.set(keyname, subscribers)
def getThreadSubscriptions(forum_thread, doset=True):
"""Gets the list of ForumThreadSubscribers who are subscribed to the given thread."""
keyname = "thread:%s:subscribers" % (forum_thread.key())
subscribers = memcache.get(keyname)
if subscribers is None: # note: empty list is OK, None is not...
subscribers = {}
query = model.forum.ForumThreadSubscriber.all().filter("forum_thread", forum_thread)
for subscriber in query:
subscribers[subscriber.user.user_id()] = subscriber
if doset:
memcache.set(keyname, subscribers)
return subscribers
def notifySubscribers(forum, forum_thread, forum_post, poster_user, poster_profile):
"""Sends an email notification to all subscribers of the given thread.
Arguments:
forum: The forum that was posted to.
forum_thread: The forum thread we posted to.
forum_post: The post that was just made
poster_user: The user who posted (we don't send notifications to this user).
poster_profile: The profile of the user who posted.
"""
subscriptions = getThreadSubscriptions(forum_thread)
tmpl = ctrl.tmpl.getTemplate("email/forum_notification.txt")
user_ids = []
for user_id, subscription in subscriptions.items():
user_ids.append(user_id)
profiles = ctrl.profile.getProfiles(user_ids)
for user_id, subscription in subscriptions.items():
if user_id == poster_user.user_id():
continue
body = ctrl.tmpl.render(tmpl, {"forum": forum, "forum_thread": forum_thread, "forum_post": forum_post,
"poster_user": poster_user, "poster_profile": poster_profile,
"profile": profiles[user_id]})
sender = "forums@warworldssite.appspotmail.com"
recipient = subscription.user.email()
if recipient:
logging.info("Sending email: {from:"+sender+", recipient:"+recipient+", subject:[war-worlds.com forums] "+
forum_thread.subject+", body:"+str(len(body))+" bytes")
mail.send_mail(sender, recipient, "[war-worlds.com forums] "+forum_thread.subject, body)
def _indexForumThread(forum_thread, new_forum_post = None):
"""Does the actual work of indexing the given forum thread. We expect to be called in a deferred handler."""
forum= forum_thread.forum
fields = [search.TextField(name="subject", value=forum_thread.subject),
search.DateField(name="posted", value=forum_thread.posted),
search.DateField(name="last_post", value=forum_thread.last_post),
search.AtomField(name="forum", value=forum.slug.replace(":", "_"))]
if forum.alliance:
fields.append(search.AtomField(name="alliance", value=forum.alliance.replace(":", "_")))
else:
fields.append(search.AtomField(name="alliance", value="NA"))
content = ""
for forum_post in model.forum.ForumPost.all().ancestor(forum_thread).order("posted"):
if new_forum_post and str(forum_post.key()) == str(new_forum_post.key()):
new_forum_post = None
content += "\r\n<hr />\r\n" + forum_post.content
if new_forum_post:
content = new_forum_post.content + content
fields.append(search.HtmlField(name="content", value=content))
doc = search.Document(
doc_id = str(forum_thread.key()),
fields = fields)
index = search.Index(name="forum")
index.put(doc)
def indexForumThread(forum_thread, forum_post = None):
"""Queues the given forum thread to be indexed."""
deferred.defer(_indexForumThread, forum_thread, forum_post, _queue="forumsync")
| 3,860
| 0
| 208
|
cce48155feb12e9de28b4f26e2575e40dcc0f875
| 2,408
|
py
|
Python
|
modules/animals.py
|
jvicu2001/alexis-bot
|
6b10377e3ec61aaa6051220ae9684b2f8f73b36e
|
[
"MIT"
] | 7
|
2017-05-01T01:06:02.000Z
|
2017-09-07T18:58:37.000Z
|
modules/animals.py
|
jvicu2001/alexis-bot
|
6b10377e3ec61aaa6051220ae9684b2f8f73b36e
|
[
"MIT"
] | 67
|
2017-12-03T21:38:00.000Z
|
2022-03-11T23:14:11.000Z
|
modules/animals.py
|
jvicu2001/alexis-bot
|
6b10377e3ec61aaa6051220ae9684b2f8f73b36e
|
[
"MIT"
] | 8
|
2017-05-01T01:06:07.000Z
|
2017-09-16T10:34:19.000Z
|
import random
from bot import Command, utils, categories
url_settings = {
'cat': ['http://aws.random.cat/meow', ['gato', 'gatito', 'neko'], 'file'],
'dog': ['https://dog.ceo/api/breeds/image/random', ['perro', 'perrito', 'doggo'], 'message'],
'shiba': ['http://shibe.online/api/shibes', ['shibe', 'shibainu'], 0],
'fox': ['https://randomfox.ca/floof/', ['foxxo'], 'image'],
'duck': ['https://random-d.uk/api/random', ['pato'], 'url'],
'bunny': ['https://api.bunnies.io/v2/loop/random/?media=gif', ['conejo'], 'media.gif'],
'owl': ['http://pics.floofybot.moe/owl', ['buho'], 'image'],
}
alias_map = {k: v[1] + [k] for k, v in url_settings.items()}
aliases = [item for x in alias_map.values() for item in x]
| 37.046154
| 120
| 0.519934
|
import random
from bot import Command, utils, categories
url_settings = {
'cat': ['http://aws.random.cat/meow', ['gato', 'gatito', 'neko'], 'file'],
'dog': ['https://dog.ceo/api/breeds/image/random', ['perro', 'perrito', 'doggo'], 'message'],
'shiba': ['http://shibe.online/api/shibes', ['shibe', 'shibainu'], 0],
'fox': ['https://randomfox.ca/floof/', ['foxxo'], 'image'],
'duck': ['https://random-d.uk/api/random', ['pato'], 'url'],
'bunny': ['https://api.bunnies.io/v2/loop/random/?media=gif', ['conejo'], 'media.gif'],
'owl': ['http://pics.floofybot.moe/owl', ['buho'], 'image'],
}
alias_map = {k: v[1] + [k] for k, v in url_settings.items()}
aliases = [item for x in alias_map.values() for item in x]
class RandomAnimal(Command):
__author__ = 'makzk'
__version__ = '1.0.0'
def __init__(self, bot):
super().__init__(bot)
self.name = 'animal'
self.format = '$[animal-usage]'
self.aliases = aliases
self.category = categories.IMAGES
async def handle(self, cmd):
if cmd.argc < 1 and cmd.cmdname in aliases:
cmd.args = [cmd.cmdname]
cmd.argc = 1
if cmd.argc > 0 and cmd.args[0] == 'help':
return await cmd.send_usage('$[animal-usage-help]', locales={'types': ', '.join(list(url_settings.keys()))})
if cmd.argc < 1:
atype = random.choice(list(url_settings.keys()))
elif cmd.args[0] in aliases:
atype = list(url_settings.keys())[0]
for k, v in alias_map.items():
if cmd.args[0] in v:
atype = k
break
else:
return await cmd.send_usage()
try:
config = url_settings[atype]
await cmd.typing()
async with self.http.get(config[0]) as r:
if r.status == 200:
data = await r.json()
if isinstance(config[2], int):
data = data[config[2]]
else:
for prop in config[2].split('.'):
data = data.get(prop, '')
embed = utils.img_embed(data, f'$[animal-{atype}-title]')
return await cmd.answer(embed)
except Exception as e:
self.log.error(e)
await cmd.answer(f'$[animal-{atype}-error]')
| 1,535
| 112
| 23
|
e8adfc21f8912426bbba47effb9094816853c2f0
| 1,837
|
py
|
Python
|
website/server/codenames/generators/connector.py
|
mderijk/codenames
|
7133a8e85243550dddf4a64e90c9550f3b9e2cb4
|
[
"MIT"
] | 2
|
2021-06-10T20:53:06.000Z
|
2021-06-11T10:45:16.000Z
|
website/server/codenames/generators/connector.py
|
mderijk/codenames
|
7133a8e85243550dddf4a64e90c9550f3b9e2cb4
|
[
"MIT"
] | null | null | null |
website/server/codenames/generators/connector.py
|
mderijk/codenames
|
7133a8e85243550dddf4a64e90c9550f3b9e2cb4
|
[
"MIT"
] | 1
|
2021-07-26T07:05:38.000Z
|
2021-07-26T07:05:38.000Z
|
import multiprocessing.connection
import os
import socket
import sys
import socket
from .protocol import Protocol
# a small hack to make the listener support non-blocking and timeout mode
# Connector class
| 25.513889
| 100
| 0.722918
|
import multiprocessing.connection
import os
import socket
import sys
import socket
from .protocol import Protocol
# a small hack to make the listener support non-blocking and timeout mode
class Listener(multiprocessing.connection.Listener):
def accept(self, *args, timeout=None, **kwargs):
if timeout:
self._listener._socket.settimeout(float(timeout))
return super().accept(*args, **kwargs)
# Connector class
class Connector:
def __init__(self, socket, logs_directory, protocol=None):
self.socket = socket
self.logfile = os.path.join(logs_directory, 'connector.log')
if protocol is None:
protocol = Protocol()
self.protocol = protocol
# create log directory
logdir = os.path.dirname(self.logfile)
os.makedirs(logdir, exist_ok=True)
def __enter__(self):
# in this context all errors are written to a log file
self._sys_stderr = sys.stderr
sys.stderr = open(self.logfile, 'a')
# establish communications
self.listener = Listener(self.socket)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.listener.close()
if exc_type == exc_value == traceback == None: # if there were no errors, reinstate the old stderr
sys.stderr.close()
sys.stderr = self._sys_stderr
def send(self, data):
self.connection.send(data)
self.connection.close()
def error(self, *errors):
response = self.protocol.error(*errors)
self.send(response)
def receive(self, timeout=None):
while True:
try:
self.connection = self.listener.accept(timeout=timeout)
except socket.timeout:
return None
response = self.connection.recv()
errors = []
for attribute in self.protocol.required:
if attribute not in response:
errors.append(self.protocol.required[attribute])
if not errors:
break
self.error(*errors)
return response
| 1,386
| 26
| 215
|
00735001c2f39a08534459a63e1b76e4dfd190da
| 904
|
py
|
Python
|
generator/substitutor.py
|
rddunphy/pwg
|
47ed13d3a8120e2c21e4ff28af08deeddbbb9d66
|
[
"MIT"
] | null | null | null |
generator/substitutor.py
|
rddunphy/pwg
|
47ed13d3a8120e2c21e4ff28af08deeddbbb9d66
|
[
"MIT"
] | null | null | null |
generator/substitutor.py
|
rddunphy/pwg
|
47ed13d3a8120e2c21e4ff28af08deeddbbb9d66
|
[
"MIT"
] | null | null | null |
import random
| 26.588235
| 65
| 0.639381
|
import random
def _toggle_case(ch):
if ch == ch.lower():
return ch.upper()
return ch.lower()
def _substitute_char(ch, substitutions):
ch = ch.lower()
if ch in substitutions:
return random.choice(substitutions[ch])
else:
return _toggle_case(ch)
def substitute(string, config):
length = len(string)
n_subst = round(length * config.munge_subst_factor)
n_caps = round(length * config.munge_caps_factor)
subst_chars = random.sample(range(length), n_subst)
caps_pop = [x for x in range(length) if x not in subst_chars]
caps_chars = random.sample(caps_pop, n_caps)
result = ""
for i, ch in enumerate(string):
if i in subst_chars:
result += _substitute_char(ch, config.substitutions)
elif i in caps_chars:
result += _toggle_case(ch)
else:
result += ch
return result
| 818
| 0
| 69
|
43829f8e584c907fdf6af75c4560e9e3478ef2a0
| 10,836
|
py
|
Python
|
scripts/run_tests.py
|
JorisHerbots/aioquic
|
a3d95020078939d7ec7d5768e06db9fa53cbf287
|
[
"BSD-3-Clause"
] | 1
|
2022-01-08T03:07:40.000Z
|
2022-01-08T03:07:40.000Z
|
scripts/run_tests.py
|
JorisHerbots/aioquic
|
a3d95020078939d7ec7d5768e06db9fa53cbf287
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/run_tests.py
|
JorisHerbots/aioquic
|
a3d95020078939d7ec7d5768e06db9fa53cbf287
|
[
"BSD-3-Clause"
] | 1
|
2020-04-30T21:00:00.000Z
|
2020-04-30T21:00:00.000Z
|
import subprocess
# need to run setup.py first to make sure all our changes are compiled before running
# if you didn't make changes to aioquic, you can comment this step out
# need to run this from inside the root dir
# so do python3 scripts/run_tests.py
print("Compiling...")
process = subprocess.run("{}".format("python3 /srv/aioquic/setup.py install"), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if process.returncode is not 0:
print ("ERROR in compilation: ", process.returncode, " != 0?")
print ( process.stderr )
print("Compilation done!")
basecommand = "python3 /srv/aioquic/examples/http3_client.py --insecure -v"
runname = ""
proper_endpoints = [
# Endpoint("https://neqo:4123/{}", "neqo"),
Endpoint("https://h3.stammw.eu:4433/{}", "quinn"),
Endpoint("https://test.privateoctopus.com:4433/{}", "picoquicFIFO"),
Endpoint("https://quic.aiortc.org/{}", "aioquic"),
Endpoint("https://http3-test.litespeedtech.com:4433/{}", "lsquic"),
Endpoint("https://fb.mvfst.net:443/{}", "mvfst"),
Endpoint("https://nghttp2.org:4433/{}", "ngtcp2"),
Endpoint("https://quic.examp1e.net/{}", "quicly"),
Endpoint("https://quic.rocks:4433/{}", "google"),
]
f5 = "https://f5quic.com:4433" # only has 50000, 5000000, 10000000 (50KB, 5MB , 10MB)
msquic = "https://quic.westus.cloudapp.azure.com" # only has 5000000.txt, 10000000.txt, 1MBfile.txt (1MB, 5MB, 10MB)
quiche = "https://quic.tech:8443" # only has 1MB.png, 5MB.png
quiche_nginx = "https://cloudflare-quic.com" # only has 1MB.png, 5MB.png
facebook = "https://www.facebook.com" # "rsrc.php/v3iXG34/y_/l/en_GB/ppT9gy-P_lf.js?_nc_x=Ij3Wp8lg5Kz"
fbcdn = "https://scontent.xx.fbcdn.net" # only has /speedtest-1MB, /speedtest-5MB, /speedtest-10MB
fbcdn_india = "https://xx-fbcdn-shv-01-bom1.fbcdn.net" # only has /speedtest-1MB, /speedtest-5MB, /speedtest-10MB
ats = "https://quic.ogre.com:4433" # en/latest/admin-guide/files/records.config.en.html
akamai = "https://ietf.akaquic.com" # /10k, /100k, /1M
# run3, main results for paper, january 13th
# run_single(1_000_000, "1file_1MB_0ms")
# run_single_endpoint(quiche + "/1MB.png", "1file_1MB_0ms", "quiche") # sadly doesn't work in VM at home, port 8443 is blocked.
# run_single_endpoint(f5 + "/50000", "1file_50KB_0ms", "f5")
# run_single_endpoint(quiche_nginx + "/1MB.png", "1file_1MB_0ms", "quicheNginx")
# run_single_endpoint(msquic + "/1MBfile.txt", "1file_1MB_0ms", "msquic")
# run_single_endpoint(fbcdn + "/speedtest-1MB", "1file_1MB_0ms", "fbcdn")
# run_single_endpoint(fbcdn_india + "/speedtest-1MB", "1file_1MB_0ms", "fbcdnIndia")
# run_single_endpoint(facebook + "/rsrc.php/v3iXG34/y_/l/en_GB/ppT9gy-P_lf.js?_nc_x=Ij3Wp8lg5Kz", "1file_400KB_0ms", "facebook")
# run_single_endpoint(ats + "/en/latest/admin-guide/files/records.config.en.html", "1file_400KB_0ms", "ats")
run_single_endpoint(akamai + "/1M", "1file_1MB_0ms", "akamai")
# run_single(5_000_000, "1file_5MB_0ms")
# run_single_endpoint(f5 + "/5000000", "1file_5MB_0ms", "f5")
# run_single_endpoint(quiche + "/5MB.png", "1file_5MB_0ms", "quiche")
# run_single_endpoint(quiche_nginx + "/5MB.png","1file_5MB_0ms", "quicheNginx")
# run_single_endpoint(msquic + "/5000000.txt", "1file_5MB_0ms", "msquic")
# run_single_endpoint(fbcdn + "/speedtest-5MB", "1file_5MB_0ms", "fbcdn")
# run_single_endpoint(fbcdn_india + "/speedtest-5MB", "1file_5MB_0ms", "fbcdnIndia")
# run_single(10_000_000, "1file_10MB_0ms")
# run_single_endpoint(f5 + "/10000000", "1file_10MB_0ms", "f5")
# run_single_endpoint(msquic + "/10000000.txt", "1file_10MB_0ms", "msquic")
# run_single_endpoint(fbcdn + "/speedtest-10MB", "1file_10MB_0ms", "fbcdn")
# run_single_endpoint(fbcdn_india + "/speedtest-10MB", "1file_10MB_0ms", "fbcdnIndia")
# for i in range(1,11):
# runname = str(i)
# run_parallel(1_000_000, 10, 0, "10files_1MB_0ms")
# run_parallel(5_000_000, 10, 0, "10files_5MB_0ms")
# run_parallel_endpoint(f5 + "/1000000", 10, 0, "10files_1MB_0ms", "f5")
# run_parallel_endpoint(f5 + "/5000000", 5, 0, "5files_5MB_0ms", "f5")
# run_parallel(1_000_000, 10, 0, "10files_1MB_0ms")
# run_parallel_endpoint(quiche + "/1MB.png", 10, 0, "10files_1MB_0ms", "quiche")
# run_parallel_endpoint(quiche_nginx + "/1MB.png", 10, 0, "10files_1MB_0ms", "quicheNginx")
# run_parallel_endpoint(msquic + "/1MBfile.txt", 10, 0, "10files_1MB_0ms", "msquic")
# run_parallel_endpoint(fbcdn + "/speedtest-1MB", 10, 0, "10files_1MB_0ms", "fbcdn")
# run_parallel_endpoint(facebook + "/rsrc.php/v3iXG34/y_/l/en_GB/ppT9gy-P_lf.js?_nc_x=Ij3Wp8lg5Kz", 10, 0, "10files_400KB_0ms", "facebook")
# run_parallel_endpoint(ats + "/en/latest/admin-guide/files/records.config.en.html", 10, 0, "10files_400KB_0ms", "ats")
# run_parallel_endpoint(fbcdn_india + "/speedtest-1MB",10, 0, "10files_1MB_0ms", "fbcdnIndia")
# run_parallel(5_000_000, 5, 0, "5files_5MB_0ms")
# run_parallel_endpoint(quiche + "/5MB.png", 5, 0, "5files_5MB_0ms", "quiche")
# run_parallel_endpoint(quiche_nginx + "/5MB.png", 5, 0, "5files_5MB_0ms", "quicheNginx")
# run_parallel_endpoint(msquic + "/5000000.txt", 5, 0, "5files_5MB_0ms", "msquic")
# run_parallel_endpoint(fbcdn + "/speedtest-5MB", 5, 0, "5files_5MB_0ms", "fbcdn")
# run_parallel_endpoint(fbcdn_india + "/speedtest-5MB",5, 0, "5files_5MB_0ms", "fbcdnIndia")
# run_parallel_endpoint(msquic + "/10000000.txt", 5, 0, "5files_10MB_0ms", "msquic")
# for these, first change MAX_DATA_WINDOW_STREAM in connection.py to 250KiB!!
# run_parallel_endpoint("https://fb.mvfst.net:4433" + "/1000000", 10, 0, "10files_1MB_0ms_flowControlFix", "mvfst")
# run_parallel_endpoint(fbcdn + "/speedtest-1MB", 10, 0, "10files_1MB_0ms_flowControlFix", "fbcdn")
# run_parallel_endpoint(fbcdn_india + "/speedtest-1MB", 10, 0, "10files_1MB_0ms_flowControlFix", "fbcdnIndia")
# run_parallel_endpoint(facebook + "/rsrc.php/v3iXG34/y_/l/en_GB/ppT9gy-P_lf.js?_nc_x=Ij3Wp8lg5Kz", 10, 0, "10files_1MB_0ms_flowControlFix", "facebook")
# for these, first change MAX_DATA_WINDOW_STREAM in connection.py to 250KiB!! and MAX_DATA_WINDOW to 10MiB
# run_parallel_endpoint("https://fb.mvfst.net:4433" + "/1000000", 10, 0, "10files_1MB_0ms_flowControlFix2", "mvfst")
# run_parallel_endpoint(fbcdn + "/speedtest-1MB", 10, 0, "10files_1MB_0ms_flowControlFix2", "fbcdn")
# run_parallel_endpoint(fbcdn_india + "/speedtest-1MB", 10, 0, "10files_1MB_0ms_flowControlFix2", "fbcdnIndia")
# run_parallel_endpoint(facebook + "/rsrc.php/v3iXG34/y_/l/en_GB/ppT9gy-P_lf.js?_nc_x=Ij3Wp8lg5Kz", 10, 0, "10files_1MB_0ms_flowControlFix2", "facebook")
# runname = "TEST1"
# run_single(5000, "1file_5000B_0ms")
# debugging
# run_single(5000, "1file_5000B_0ms")
# run_single(1_000_000, "1file_1MB_0ms")
# run_single(5_000_000, "1file_5MB_0ms")
# run_single_fbcdn("https://scontent.xx.fbcdn.net/speedtest-10MB", "1file_10MB_0ms")
# run_single_fbcdn("https://scontent.xx.fbcdn.net/speedtest-100MB", "1file_100MB_0ms")
# run_single_fbcdn("https://xx-fbcdn-shv-01-bom1.fbcdn.net/speedtest-10MB", "1file_10MB_0ms", "fbcdn-india")
# run_single_fbcdn("https://scontent-bru2-1.xx.fbcdn.net/v/t31.0-8/11333999_10206053543551446_142142577509361396_o.jpg?_nc_cat=105&_nc_ohc=Ydfgv65b-1wAQlHich3zGFlggP_28Kct-L9A4ks99FSLaEK7oLNPMiFtQ&_nc_ht=scontent-bru2-1.xx&oh=11dbe11236cf4df32e3f3518d2f91a16&oe=5E7E764A",
# "1file_400KB_0ms")
# probe for default buffer size (min packet size is 1280, so work in increments of that)
# run_parallel(1200, 10, 0, "10files_1200B_0ms") # 1 packet
# run_parallel(2400, 10, 0, "10files_2400B_0ms") # 2 packets
# run_parallel(3600, 10, 0, "10files_3600B_0ms") # 3 packets
# run_parallel(7200, 10, 0, "10files_7200B_0ms") # 6 packets
# run_parallel(12000, 10, 0, "10files_12KB_0ms") # 10 packets
# initial tests: 10x xMB, see global multiplexing behaviour appear
# run_parallel(1_000_000, 10, 0, "10files_1MB_0ms")
# run_parallel(5_000_000, 10, 0, "10files_5MB_0ms")
# run_parallel(10_000_000, 10, 0, "10files_10MB_0ms")
# 2nd tests: slight delay between files, see how that affects things (when does e.g., RR kick in)
# run_parallel(1_000_000, 10, 0.1, "10files_1MB_100ms")
# run_parallel(1_000_000, 10, 0.5, "10files_1MB_500ms")
# run_parallel(1_000_000, 10, 1, "10files_1MB_1000ms")
| 55.85567
| 272
| 0.66842
|
import subprocess
# need to run setup.py first to make sure all our changes are compiled before running
# if you didn't make changes to aioquic, you can comment this step out
# need to run this from inside the root dir
# so do python3 scripts/run_tests.py
print("Compiling...")
process = subprocess.run("{}".format("python3 /srv/aioquic/setup.py install"), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if process.returncode is not 0:
print ("ERROR in compilation: ", process.returncode, " != 0?")
print ( process.stderr )
print("Compilation done!")
basecommand = "python3 /srv/aioquic/examples/http3_client.py --insecure -v"
class Endpoint:
def __init__(self, url, name):
self.url = url
self.name = name
runname = ""
proper_endpoints = [
# Endpoint("https://neqo:4123/{}", "neqo"),
Endpoint("https://h3.stammw.eu:4433/{}", "quinn"),
Endpoint("https://test.privateoctopus.com:4433/{}", "picoquicFIFO"),
Endpoint("https://quic.aiortc.org/{}", "aioquic"),
Endpoint("https://http3-test.litespeedtech.com:4433/{}", "lsquic"),
Endpoint("https://fb.mvfst.net:443/{}", "mvfst"),
Endpoint("https://nghttp2.org:4433/{}", "ngtcp2"),
Endpoint("https://quic.examp1e.net/{}", "quicly"),
Endpoint("https://quic.rocks:4433/{}", "google"),
]
f5 = "https://f5quic.com:4433" # only has 50000, 5000000, 10000000 (50KB, 5MB , 10MB)
msquic = "https://quic.westus.cloudapp.azure.com" # only has 5000000.txt, 10000000.txt, 1MBfile.txt (1MB, 5MB, 10MB)
quiche = "https://quic.tech:8443" # only has 1MB.png, 5MB.png
quiche_nginx = "https://cloudflare-quic.com" # only has 1MB.png, 5MB.png
facebook = "https://www.facebook.com" # "rsrc.php/v3iXG34/y_/l/en_GB/ppT9gy-P_lf.js?_nc_x=Ij3Wp8lg5Kz"
fbcdn = "https://scontent.xx.fbcdn.net" # only has /speedtest-1MB, /speedtest-5MB, /speedtest-10MB
fbcdn_india = "https://xx-fbcdn-shv-01-bom1.fbcdn.net" # only has /speedtest-1MB, /speedtest-5MB, /speedtest-10MB
ats = "https://quic.ogre.com:4433" # en/latest/admin-guide/files/records.config.en.html
akamai = "https://ietf.akaquic.com" # /10k, /100k, /1M
def run_single(size, testname):
for endpoint in proper_endpoints:
url = endpoint.url.format(str(size))
cmd = basecommand + " " + "--quic-log /srv/aioquic/qlog/run"+ runname +"_single_" + testname + "_" + endpoint.name + ".qlog " + url
print ("Executing ", cmd)
run_command ( cmd )
def run_single_endpoint(url, testname, endpointName):
cmd = basecommand + " " + "--quic-log /srv/aioquic/qlog/run"+ runname +"_single_" + testname + "_" + endpointName + ".qlog \"" + url + "\""
print ("Executing ", cmd)
run_command ( cmd )
def run_parallel(size, amount, delay, testname):
for endpoint in proper_endpoints:
url = endpoint.url.format(str(size))
delaystr = ""
if delay > 0:
delaystr = " --delay-parallel " + str(delay) + " " # delay is in SECONDS
cmd = basecommand + " " + "--parallel " + str(amount) + delaystr + " --quic-log /srv/aioquic/qlog/run"+ runname +"_parallel_" + testname + "_" + endpoint.name + ".qlog " + url
print ("Executing ", cmd)
run_command ( cmd )
def run_parallel_endpoint(url, amount, delay, testname, endpointName):
delaystr = ""
if delay > 0:
delaystr = " --delay-parallel " + str(delay) + " " # delay is in SECONDS
cmd = basecommand + " " + "--parallel " + str(amount) + delaystr + " --quic-log /srv/aioquic/qlog/run"+ runname +"_parallel_" + testname + "_" + endpointName + ".qlog \"" + url + "\""
print ("Executing ", cmd)
run_command ( cmd )
def run_command(cmd):
process = subprocess.run("{}".format(cmd), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if ( len(process.stdout) > 0 ):
print ( process.stdout )
if len(process.stderr) is not 0 or process.returncode is not 0:
# print ("Potential ERROR in process: ", process.returncode, " != 0?")
print ( process.stderr )
# run3, main results for paper, january 13th
# run_single(1_000_000, "1file_1MB_0ms")
# run_single_endpoint(quiche + "/1MB.png", "1file_1MB_0ms", "quiche") # sadly doesn't work in VM at home, port 8443 is blocked.
# run_single_endpoint(f5 + "/50000", "1file_50KB_0ms", "f5")
# run_single_endpoint(quiche_nginx + "/1MB.png", "1file_1MB_0ms", "quicheNginx")
# run_single_endpoint(msquic + "/1MBfile.txt", "1file_1MB_0ms", "msquic")
# run_single_endpoint(fbcdn + "/speedtest-1MB", "1file_1MB_0ms", "fbcdn")
# run_single_endpoint(fbcdn_india + "/speedtest-1MB", "1file_1MB_0ms", "fbcdnIndia")
# run_single_endpoint(facebook + "/rsrc.php/v3iXG34/y_/l/en_GB/ppT9gy-P_lf.js?_nc_x=Ij3Wp8lg5Kz", "1file_400KB_0ms", "facebook")
# run_single_endpoint(ats + "/en/latest/admin-guide/files/records.config.en.html", "1file_400KB_0ms", "ats")
run_single_endpoint(akamai + "/1M", "1file_1MB_0ms", "akamai")
# run_single(5_000_000, "1file_5MB_0ms")
# run_single_endpoint(f5 + "/5000000", "1file_5MB_0ms", "f5")
# run_single_endpoint(quiche + "/5MB.png", "1file_5MB_0ms", "quiche")
# run_single_endpoint(quiche_nginx + "/5MB.png","1file_5MB_0ms", "quicheNginx")
# run_single_endpoint(msquic + "/5000000.txt", "1file_5MB_0ms", "msquic")
# run_single_endpoint(fbcdn + "/speedtest-5MB", "1file_5MB_0ms", "fbcdn")
# run_single_endpoint(fbcdn_india + "/speedtest-5MB", "1file_5MB_0ms", "fbcdnIndia")
# run_single(10_000_000, "1file_10MB_0ms")
# run_single_endpoint(f5 + "/10000000", "1file_10MB_0ms", "f5")
# run_single_endpoint(msquic + "/10000000.txt", "1file_10MB_0ms", "msquic")
# run_single_endpoint(fbcdn + "/speedtest-10MB", "1file_10MB_0ms", "fbcdn")
# run_single_endpoint(fbcdn_india + "/speedtest-10MB", "1file_10MB_0ms", "fbcdnIndia")
# for i in range(1,11):
# runname = str(i)
# run_parallel(1_000_000, 10, 0, "10files_1MB_0ms")
# run_parallel(5_000_000, 10, 0, "10files_5MB_0ms")
# run_parallel_endpoint(f5 + "/1000000", 10, 0, "10files_1MB_0ms", "f5")
# run_parallel_endpoint(f5 + "/5000000", 5, 0, "5files_5MB_0ms", "f5")
# run_parallel(1_000_000, 10, 0, "10files_1MB_0ms")
# run_parallel_endpoint(quiche + "/1MB.png", 10, 0, "10files_1MB_0ms", "quiche")
# run_parallel_endpoint(quiche_nginx + "/1MB.png", 10, 0, "10files_1MB_0ms", "quicheNginx")
# run_parallel_endpoint(msquic + "/1MBfile.txt", 10, 0, "10files_1MB_0ms", "msquic")
# run_parallel_endpoint(fbcdn + "/speedtest-1MB", 10, 0, "10files_1MB_0ms", "fbcdn")
# run_parallel_endpoint(facebook + "/rsrc.php/v3iXG34/y_/l/en_GB/ppT9gy-P_lf.js?_nc_x=Ij3Wp8lg5Kz", 10, 0, "10files_400KB_0ms", "facebook")
# run_parallel_endpoint(ats + "/en/latest/admin-guide/files/records.config.en.html", 10, 0, "10files_400KB_0ms", "ats")
# run_parallel_endpoint(fbcdn_india + "/speedtest-1MB",10, 0, "10files_1MB_0ms", "fbcdnIndia")
# run_parallel(5_000_000, 5, 0, "5files_5MB_0ms")
# run_parallel_endpoint(quiche + "/5MB.png", 5, 0, "5files_5MB_0ms", "quiche")
# run_parallel_endpoint(quiche_nginx + "/5MB.png", 5, 0, "5files_5MB_0ms", "quicheNginx")
# run_parallel_endpoint(msquic + "/5000000.txt", 5, 0, "5files_5MB_0ms", "msquic")
# run_parallel_endpoint(fbcdn + "/speedtest-5MB", 5, 0, "5files_5MB_0ms", "fbcdn")
# run_parallel_endpoint(fbcdn_india + "/speedtest-5MB",5, 0, "5files_5MB_0ms", "fbcdnIndia")
# run_parallel_endpoint(msquic + "/10000000.txt", 5, 0, "5files_10MB_0ms", "msquic")
# for these, first change MAX_DATA_WINDOW_STREAM in connection.py to 250KiB!!
# run_parallel_endpoint("https://fb.mvfst.net:4433" + "/1000000", 10, 0, "10files_1MB_0ms_flowControlFix", "mvfst")
# run_parallel_endpoint(fbcdn + "/speedtest-1MB", 10, 0, "10files_1MB_0ms_flowControlFix", "fbcdn")
# run_parallel_endpoint(fbcdn_india + "/speedtest-1MB", 10, 0, "10files_1MB_0ms_flowControlFix", "fbcdnIndia")
# run_parallel_endpoint(facebook + "/rsrc.php/v3iXG34/y_/l/en_GB/ppT9gy-P_lf.js?_nc_x=Ij3Wp8lg5Kz", 10, 0, "10files_1MB_0ms_flowControlFix", "facebook")
# for these, first change MAX_DATA_WINDOW_STREAM in connection.py to 250KiB!! and MAX_DATA_WINDOW to 10MiB
# run_parallel_endpoint("https://fb.mvfst.net:4433" + "/1000000", 10, 0, "10files_1MB_0ms_flowControlFix2", "mvfst")
# run_parallel_endpoint(fbcdn + "/speedtest-1MB", 10, 0, "10files_1MB_0ms_flowControlFix2", "fbcdn")
# run_parallel_endpoint(fbcdn_india + "/speedtest-1MB", 10, 0, "10files_1MB_0ms_flowControlFix2", "fbcdnIndia")
# run_parallel_endpoint(facebook + "/rsrc.php/v3iXG34/y_/l/en_GB/ppT9gy-P_lf.js?_nc_x=Ij3Wp8lg5Kz", 10, 0, "10files_1MB_0ms_flowControlFix2", "facebook")
# runname = "TEST1"
# run_single(5000, "1file_5000B_0ms")
# debugging
# run_single(5000, "1file_5000B_0ms")
# run_single(1_000_000, "1file_1MB_0ms")
# run_single(5_000_000, "1file_5MB_0ms")
# run_single_fbcdn("https://scontent.xx.fbcdn.net/speedtest-10MB", "1file_10MB_0ms")
# run_single_fbcdn("https://scontent.xx.fbcdn.net/speedtest-100MB", "1file_100MB_0ms")
# run_single_fbcdn("https://xx-fbcdn-shv-01-bom1.fbcdn.net/speedtest-10MB", "1file_10MB_0ms", "fbcdn-india")
# run_single_fbcdn("https://scontent-bru2-1.xx.fbcdn.net/v/t31.0-8/11333999_10206053543551446_142142577509361396_o.jpg?_nc_cat=105&_nc_ohc=Ydfgv65b-1wAQlHich3zGFlggP_28Kct-L9A4ks99FSLaEK7oLNPMiFtQ&_nc_ht=scontent-bru2-1.xx&oh=11dbe11236cf4df32e3f3518d2f91a16&oe=5E7E764A",
# "1file_400KB_0ms")
# probe for default buffer size (min packet size is 1280, so work in increments of that)
# run_parallel(1200, 10, 0, "10files_1200B_0ms") # 1 packet
# run_parallel(2400, 10, 0, "10files_2400B_0ms") # 2 packets
# run_parallel(3600, 10, 0, "10files_3600B_0ms") # 3 packets
# run_parallel(7200, 10, 0, "10files_7200B_0ms") # 6 packets
# run_parallel(12000, 10, 0, "10files_12KB_0ms") # 10 packets
# initial tests: 10x xMB, see global multiplexing behaviour appear
# run_parallel(1_000_000, 10, 0, "10files_1MB_0ms")
# run_parallel(5_000_000, 10, 0, "10files_5MB_0ms")
# run_parallel(10_000_000, 10, 0, "10files_10MB_0ms")
# 2nd tests: slight delay between files, see how that affects things (when does e.g., RR kick in)
# run_parallel(1_000_000, 10, 0.1, "10files_1MB_100ms")
# run_parallel(1_000_000, 10, 0.5, "10files_1MB_500ms")
# run_parallel(1_000_000, 10, 1, "10files_1MB_1000ms")
| 1,862
| -6
| 164
|
078c266bfb0d03456b4eacc54cbb665b4eebd7ca
| 232
|
py
|
Python
|
homeassistant/components/advantage_air/const.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 1
|
2020-12-17T19:05:31.000Z
|
2020-12-17T19:05:31.000Z
|
homeassistant/components/advantage_air/const.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 56
|
2020-08-03T07:30:54.000Z
|
2022-03-31T06:02:04.000Z
|
homeassistant/components/advantage_air/const.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 1
|
2021-04-30T01:57:13.000Z
|
2021-04-30T01:57:13.000Z
|
"""Constants used by Advantage Air integration."""
DOMAIN = "advantage_air"
ADVANTAGE_AIR_RETRY = 5
ADVANTAGE_AIR_STATE_OPEN = "open"
ADVANTAGE_AIR_STATE_CLOSE = "close"
ADVANTAGE_AIR_STATE_ON = "on"
ADVANTAGE_AIR_STATE_OFF = "off"
| 29
| 50
| 0.797414
|
"""Constants used by Advantage Air integration."""
DOMAIN = "advantage_air"
ADVANTAGE_AIR_RETRY = 5
ADVANTAGE_AIR_STATE_OPEN = "open"
ADVANTAGE_AIR_STATE_CLOSE = "close"
ADVANTAGE_AIR_STATE_ON = "on"
ADVANTAGE_AIR_STATE_OFF = "off"
| 0
| 0
| 0
|
0f9c09f8348b4f07928bc6ec571811ec5e2db857
| 172
|
py
|
Python
|
03 Bit Manipulation/3.7 Longest consecutive one.py
|
MrNevil/GeeksForGeeks-DSA-2
|
cb53912a2bfea5e04ce1b2c10e103a61ae1d903b
|
[
"MIT"
] | 1
|
2021-04-03T06:23:42.000Z
|
2021-04-03T06:23:42.000Z
|
03 Bit Manipulation/3.7 Longest consecutive one.py
|
MrNevil/GeeksForGeeks-DSA-2
|
cb53912a2bfea5e04ce1b2c10e103a61ae1d903b
|
[
"MIT"
] | null | null | null |
03 Bit Manipulation/3.7 Longest consecutive one.py
|
MrNevil/GeeksForGeeks-DSA-2
|
cb53912a2bfea5e04ce1b2c10e103a61ae1d903b
|
[
"MIT"
] | 2
|
2021-03-08T21:13:22.000Z
|
2021-06-16T12:32:05.000Z
|
print(maxConsecutiveOnes(14)) # Output = 3 as binary(14) = 1110
| 19.111111
| 65
| 0.552326
|
def maxConsecutiveOnes(x):
cnt = 0
while x:
x = x&(x>>1)
cnt += 1
return cnt
print(maxConsecutiveOnes(14)) # Output = 3 as binary(14) = 1110
| 83
| 0
| 22
|
3189556b27e7fd6f0a8a9a78cb41b4b0730acf5f
| 204
|
py
|
Python
|
Ranmath/MatrixNormalizers/AbstractNormalizer.py
|
pawel-ta/ranmath
|
f52a15b10bdb5830a50c43da11fed5f182026587
|
[
"MIT"
] | null | null | null |
Ranmath/MatrixNormalizers/AbstractNormalizer.py
|
pawel-ta/ranmath
|
f52a15b10bdb5830a50c43da11fed5f182026587
|
[
"MIT"
] | null | null | null |
Ranmath/MatrixNormalizers/AbstractNormalizer.py
|
pawel-ta/ranmath
|
f52a15b10bdb5830a50c43da11fed5f182026587
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
| 15.692308
| 47
| 0.666667
|
from abc import ABC, abstractmethod
class AbstractNormalizer(ABC):
def __init__(self):
super().__init__()
@abstractmethod
def normalize(self, matrix, verbose=False):
pass
| 60
| 83
| 23
|
ea2b01aea886d2f37de71471f383795fb6766619
| 1,888
|
py
|
Python
|
WeatherDash/Forecast_IO_py/forecastiopy/FIOMinutely.py
|
Rcarballo2222/ENGI-301
|
341f7e76ec842e1e1f449b2206633150abab1b31
|
[
"MIT"
] | null | null | null |
WeatherDash/Forecast_IO_py/forecastiopy/FIOMinutely.py
|
Rcarballo2222/ENGI-301
|
341f7e76ec842e1e1f449b2206633150abab1b31
|
[
"MIT"
] | null | null | null |
WeatherDash/Forecast_IO_py/forecastiopy/FIOMinutely.py
|
Rcarballo2222/ENGI-301
|
341f7e76ec842e1e1f449b2206633150abab1b31
|
[
"MIT"
] | 1
|
2019-04-17T19:27:56.000Z
|
2019-04-17T19:27:56.000Z
|
# -*- coding: utf-8 -*-
"""
This module recieves an ForecastIO object and holds the minutely weather
conditions. It has one class for this purpose.
"""
class FIOMinutely(object):
"""
This class recieves an ForecastIO object and holds the minutely weather
conditions. It has one class for this purpose.
"""
minutely = None
def __init__(self, forecast_io):
"""
Recieves an ForecastIO object and gets the minutely weather conditions
if they are available in the object.
"""
if forecast_io.has_minutely():
self.minutely = forecast_io.get_minutely()
for item in forecast_io.get_minutely().keys():
setattr(self, item, forecast_io.get_minutely()[item])
for minute in range(0, self.minutes()):
for item in self.get_minute(minute).keys():
setattr(self, 'minute_'+str(minute+1)+'_'+item, \
self.get_minute(minute)[item])
def get(self, minute=None):
"""
Returns a dictionary with minutely weather conditions.
Returns None is none are available.
A day can be passed as an argument, is so function will call get_minute()
to return that day.
Look on function get_minute()
"""
if minute is None:
return self.minutely
else:
return self.get_minute(minute)
def get_minute(self, minute):
"""
Recieves a minute as an argument and returns the prediction for that
minute if is available. If not, function will return None.
"""
if minute > self.minutes():
return None
else:
return self.get()['data'][minute-1]
def minutes(self):
"""
Returns how many minutes of prediction are available
"""
return len(self.get()['data'])
| 33.122807
| 81
| 0.597987
|
# -*- coding: utf-8 -*-
"""
This module recieves an ForecastIO object and holds the minutely weather
conditions. It has one class for this purpose.
"""
class FIOMinutely(object):
"""
This class recieves an ForecastIO object and holds the minutely weather
conditions. It has one class for this purpose.
"""
minutely = None
def __init__(self, forecast_io):
"""
Recieves an ForecastIO object and gets the minutely weather conditions
if they are available in the object.
"""
if forecast_io.has_minutely():
self.minutely = forecast_io.get_minutely()
for item in forecast_io.get_minutely().keys():
setattr(self, item, forecast_io.get_minutely()[item])
for minute in range(0, self.minutes()):
for item in self.get_minute(minute).keys():
setattr(self, 'minute_'+str(minute+1)+'_'+item, \
self.get_minute(minute)[item])
def get(self, minute=None):
"""
Returns a dictionary with minutely weather conditions.
Returns None is none are available.
A day can be passed as an argument, is so function will call get_minute()
to return that day.
Look on function get_minute()
"""
if minute is None:
return self.minutely
else:
return self.get_minute(minute)
def get_minute(self, minute):
"""
Recieves a minute as an argument and returns the prediction for that
minute if is available. If not, function will return None.
"""
if minute > self.minutes():
return None
else:
return self.get()['data'][minute-1]
def minutes(self):
"""
Returns how many minutes of prediction are available
"""
return len(self.get()['data'])
| 0
| 0
| 0
|
03bd722265b3aa46494aea9bb9451e7c637cfa8d
| 2,100
|
py
|
Python
|
backup.py
|
lucapericlp/kohonen
|
c6e84cc95b0468e49d258d3e40843b8090dcd3a0
|
[
"MIT"
] | 1
|
2020-04-07T06:59:41.000Z
|
2020-04-07T06:59:41.000Z
|
backup.py
|
lucapericlp/kohonen
|
c6e84cc95b0468e49d258d3e40843b8090dcd3a0
|
[
"MIT"
] | null | null | null |
backup.py
|
lucapericlp/kohonen
|
c6e84cc95b0468e49d258d3e40843b8090dcd3a0
|
[
"MIT"
] | null | null | null |
import math
import random
import pandas as pd
from Neuron import Neuron
from Neuron import getNormalised
from Visualiser import Visualiser
if __name__ == '__main__':
main()
# if 4 neurons are used then one is left unused as a cluster i.e it is extra
# if 3 neurons all are used
| 30.882353
| 99
| 0.74
|
import math
import random
import pandas as pd
from Neuron import Neuron
from Neuron import getNormalised
from Visualiser import Visualiser
class Network():
def __init__(self,numNeurons):
self.neurons = []
for i in range(numNeurons):
self.neurons.append(Neuron(weights=[random.uniform(0,1),
random.uniform(0,1),random.uniform(0,1)]))
def train(self,inputs,lr):
normalised_inputs = getNormalised(inputs)
posWithLargestScore = self.closestNeuron(normalised_inputs)
winningNeuron = self.neurons[posWithLargestScore]
winningNeuron.updateWeights(normalised_inputs,lr)
def predict(self,all_inputs):
clustered_dict = {index:[] for index,neuron in enumerate(self.neurons)} #initialise positions
inputColours = {0:'r',1:'b',2:'g'}
visualiser = Visualiser(size=111)
for index,neuron in enumerate(self.neurons):
visualiser.add(neuron.weights[0],neuron.weights[1],neuron.weights[2],'y','^')
for index,norm_input in all_inputs.iterrows():
winningNeuron = self.closestNeuron(getNormalised(norm_input))
visualiser.add(norm_input[0],norm_input[1],norm_input[2],inputColours[winningNeuron],'o')
clustered_dict[winningNeuron].append(norm_input)#[str(i) for i in norm_input]) use for debugging
visualiser.show()
return clustered_dict
def closestNeuron(self,normalised_inputs):
largestNum = 0
posWithLargestScore = 0
for pos,neuron in enumerate(self.neurons):
netScore = neuron.calcNet(normalised_inputs)
if netScore > largestNum:
largestNum = netScore
posWithLargestScore = pos
return posWithLargestScore
def __str__(self):
return "<Network w/ neurons:\n {}\n>".format(','.join([str(n) for n in self.neurons]))
def main():
network = Network(numNeurons=3)
lr = 0.1
epochs = 600
df = pd.read_csv('data.csv',header=None)
df.dropna(inplace=True)
for i in range(epochs):
for index,row in df.iterrows():
network.train(row,lr)
clustered_dict = network.predict(df)
print(network)
if __name__ == '__main__':
main()
# if 4 neurons are used then one is left unused as a cluster i.e it is extra
# if 3 neurons all are used
| 1,658
| -5
| 166
|
3b2595ebe0280acb97edd20ead3f07705eb3012c
| 345
|
py
|
Python
|
genomicode/timer.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | 9
|
2017-01-13T02:38:41.000Z
|
2021-04-08T00:44:39.000Z
|
genomicode/timer.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | null | null | null |
genomicode/timer.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | 4
|
2017-01-05T16:25:25.000Z
|
2019-12-12T20:07:38.000Z
|
"""
Functions:
wait
"""
TIMERS = {} # name -> time
| 15
| 56
| 0.556522
|
"""
Functions:
wait
"""
TIMERS = {} # name -> time
def wait(delay, name=None):
global TIMERS
import time
if delay is None:
delay = 2
if name is None:
name = "default"
how_long = TIMERS.get(name, 0) + delay - time.time()
if how_long > 0:
time.sleep(how_long)
TIMERS[name] = time.time()
| 266
| 0
| 23
|
d3000724ed087c837edb4323a518186e089d5ac4
| 1,117
|
py
|
Python
|
scripts/practice/FB-reRun/QueueUsingLinkedList.py
|
bhimeshchauhan/competitive_programming
|
e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5
|
[
"MIT"
] | null | null | null |
scripts/practice/FB-reRun/QueueUsingLinkedList.py
|
bhimeshchauhan/competitive_programming
|
e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5
|
[
"MIT"
] | 8
|
2020-09-05T16:04:31.000Z
|
2022-02-27T09:57:51.000Z
|
scripts/practice/FB-reRun/QueueUsingLinkedList.py
|
bhimeshchauhan/competitive_programming
|
e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5
|
[
"MIT"
] | null | null | null |
"""
Queue Using Linked List
"""
# Python3 program to demonstrate linked list
# based implementation of queue
# A linked list (LL) node
# to store a queue entry
# A class to represent a queue
# The queue, front stores the front node
# of LL and rear stores the last node of LL
# Method to add an item to the queue
# Method to remove an item from queue
# Driver Code
if __name__== '__main__':
q = Queue()
q.EnQueue(10)
q.EnQueue(20)
q.DeQueue()
q.DeQueue()
q.EnQueue(30)
q.EnQueue(40)
q.EnQueue(50)
q.DeQueue()
print("Queue Front " + str(q.front.data))
print("Queue Rear " + str(q.rear.data))
| 17.184615
| 44
| 0.664279
|
"""
Queue Using Linked List
"""
# Python3 program to demonstrate linked list
# based implementation of queue
# A linked list (LL) node
# to store a queue entry
class Node:
def __init__(self, data):
self.data = data
self.next = None
# A class to represent a queue
# The queue, front stores the front node
# of LL and rear stores the last node of LL
class Queue:
def __init__(self):
self.front = self.rear = None
def isEmpty(self):
return self.front == None
# Method to add an item to the queue
def EnQueue(self, item):
temp = Node(item)
if self.rear == None:
self.front = self.rear = temp
return
self.rear.next = temp
self.rear = temp
# Method to remove an item from queue
def DeQueue(self):
if self.isEmpty():
return
temp = self.front
self.front = temp.next
if(self.front == None):
self.rear = None
# Driver Code
if __name__== '__main__':
q = Queue()
q.EnQueue(10)
q.EnQueue(20)
q.DeQueue()
q.DeQueue()
q.EnQueue(30)
q.EnQueue(40)
q.EnQueue(50)
q.DeQueue()
print("Queue Front " + str(q.front.data))
print("Queue Rear " + str(q.rear.data))
| 356
| -19
| 164
|
2a3e82a35acbab969c1df2af10edbfb6bd0c9e8f
| 4,791
|
py
|
Python
|
python_modules/libraries/dagster-pandas/dagster_pandas/data_frame.py
|
pseudoPixels/dagster
|
ac78bdbec54754d35f51d706fc5b0bacfe49f2bf
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-pandas/dagster_pandas/data_frame.py
|
pseudoPixels/dagster
|
ac78bdbec54754d35f51d706fc5b0bacfe49f2bf
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-pandas/dagster_pandas/data_frame.py
|
pseudoPixels/dagster
|
ac78bdbec54754d35f51d706fc5b0bacfe49f2bf
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from dagster_pandas.constraints import ConstraintViolationException
from dagster_pandas.validation import validate_collection_schema
from dagster import (
DagsterInvariantViolationError,
EventMetadataEntry,
Field,
Materialization,
Path,
String,
TypeCheck,
as_dagster_type,
check,
dagster_type,
)
from dagster.core.types.config.field_utils import NamedSelector
from dagster.core.types.runtime.config_schema import input_selector_schema, output_selector_schema
@output_selector_schema(
NamedSelector(
'DataFrameOutputSchema',
{
'csv': {'path': Path, 'sep': Field(String, is_optional=True, default_value=','),},
'parquet': {'path': Path},
'table': {'path': Path},
},
)
)
@input_selector_schema(
NamedSelector(
'DataFrameInputSchema',
{
'csv': {'path': Path, 'sep': Field(String, is_optional=True, default_value=','),},
'parquet': {'path': Path},
'table': {'path': Path},
},
)
)
DataFrame = as_dagster_type(
pd.DataFrame,
name='PandasDataFrame',
description='''Two-dimensional size-mutable, potentially heterogeneous
tabular data structure with labeled axes (rows and columns).
See http://pandas.pydata.org/''',
input_hydration_config=dataframe_input_schema,
output_materialization_config=dataframe_output_schema,
type_check=df_type_check,
)
| 33.978723
| 98
| 0.675433
|
import pandas as pd
from dagster_pandas.constraints import ConstraintViolationException
from dagster_pandas.validation import validate_collection_schema
from dagster import (
DagsterInvariantViolationError,
EventMetadataEntry,
Field,
Materialization,
Path,
String,
TypeCheck,
as_dagster_type,
check,
dagster_type,
)
from dagster.core.types.config.field_utils import NamedSelector
from dagster.core.types.runtime.config_schema import input_selector_schema, output_selector_schema
def dict_without_keys(ddict, *keys):
return {key: value for key, value in ddict.items() if key not in set(keys)}
@output_selector_schema(
NamedSelector(
'DataFrameOutputSchema',
{
'csv': {'path': Path, 'sep': Field(String, is_optional=True, default_value=','),},
'parquet': {'path': Path},
'table': {'path': Path},
},
)
)
def dataframe_output_schema(_context, file_type, file_options, pandas_df):
check.str_param(file_type, 'file_type')
check.dict_param(file_options, 'file_options')
check.inst_param(pandas_df, 'pandas_df', DataFrame)
if file_type == 'csv':
path = file_options['path']
pandas_df.to_csv(path, index=False, **dict_without_keys(file_options, 'path'))
elif file_type == 'parquet':
pandas_df.to_parquet(file_options['path'])
elif file_type == 'table':
pandas_df.to_csv(file_options['path'], sep='\t', index=False)
else:
check.failed('Unsupported file_type {file_type}'.format(file_type=file_type))
return Materialization.file(file_options['path'])
@input_selector_schema(
NamedSelector(
'DataFrameInputSchema',
{
'csv': {'path': Path, 'sep': Field(String, is_optional=True, default_value=','),},
'parquet': {'path': Path},
'table': {'path': Path},
},
)
)
def dataframe_input_schema(_context, file_type, file_options):
check.str_param(file_type, 'file_type')
check.dict_param(file_options, 'file_options')
if file_type == 'csv':
path = file_options['path']
return pd.read_csv(path, **dict_without_keys(file_options, 'path'))
elif file_type == 'parquet':
return pd.read_parquet(file_options['path'])
elif file_type == 'table':
return pd.read_csv(file_options['path'], sep='\t')
else:
raise DagsterInvariantViolationError(
'Unsupported file_type {file_type}'.format(file_type=file_type)
)
def df_type_check(value):
if not isinstance(value, pd.DataFrame):
return TypeCheck(success=False)
return TypeCheck(
success=True,
metadata_entries=[
EventMetadataEntry.text(str(len(value)), 'row_count', 'Number of rows in DataFrame'),
# string cast columns since they may be things like datetime
EventMetadataEntry.json({'columns': list(map(str, value.columns))}, 'metadata'),
],
)
DataFrame = as_dagster_type(
pd.DataFrame,
name='PandasDataFrame',
description='''Two-dimensional size-mutable, potentially heterogeneous
tabular data structure with labeled axes (rows and columns).
See http://pandas.pydata.org/''',
input_hydration_config=dataframe_input_schema,
output_materialization_config=dataframe_output_schema,
type_check=df_type_check,
)
def create_dagster_pandas_dataframe_type(
name=None, type_check=None, columns=None, summary_statistics=None
):
summary_statistics = check.opt_callable_param(summary_statistics, 'summary_statistics')
def _dagster_type_check(value):
event_metadata = []
if columns is not None:
try:
validate_collection_schema(columns, value)
except ConstraintViolationException as e:
return TypeCheck(success=False, description=str(e))
if type_check:
type_check_object = check.inst_param(
type_check(value), 'user_type_check_object', TypeCheck
)
if not type_check_object.success:
return type_check_object
event_metadata += type_check_object.metadata_entries
if summary_statistics:
metadata_entries = summary_statistics(value)
event_metadata += check.opt_list_param(
metadata_entries, 'metadata_entries', of_type=EventMetadataEntry
)
return TypeCheck(success=True, metadata_entries=event_metadata)
@dagster_type( # pylint: disable=W0223
name=name, type_check=_dagster_type_check,
)
class _DataFrameDagsterType(DataFrame):
pass
# Did this instead of as_dagster_type because multiple dataframe types can be created
return _DataFrameDagsterType
| 3,202
| 0
| 113
|
320c82319d7caa371edf71cad37b302372b728d6
| 321
|
py
|
Python
|
tools/boneck/boneck.py
|
6vasia/ii-base
|
a36ea24040c24f5c3159b022670d49ffd68de638
|
[
"BSD-3-Clause"
] | 1
|
2021-07-28T21:34:45.000Z
|
2021-07-28T21:34:45.000Z
|
tools/boneck/boneck.py
|
6vasia/ii-base
|
a36ea24040c24f5c3159b022670d49ffd68de638
|
[
"BSD-3-Clause"
] | null | null | null |
tools/boneck/boneck.py
|
6vasia/ii-base
|
a36ea24040c24f5c3159b022670d49ffd68de638
|
[
"BSD-3-Clause"
] | null | null | null |
cfg = open('config.user').read().splitlines()
bonecho = open('bone.echo').read().strip()
el = open('echo/%s' % bonecho).read().splitlines()
lastbone = open('msg/%s' % el[-1]).read().splitlines()[8:]
if lastbone:
open('bone.echo','w').write(lastbone[0])
open('config.cfg','w').write('\n'.join(cfg+lastbone+['']))
| 35.666667
| 62
| 0.613707
|
cfg = open('config.user').read().splitlines()
bonecho = open('bone.echo').read().strip()
el = open('echo/%s' % bonecho).read().splitlines()
lastbone = open('msg/%s' % el[-1]).read().splitlines()[8:]
if lastbone:
open('bone.echo','w').write(lastbone[0])
open('config.cfg','w').write('\n'.join(cfg+lastbone+['']))
| 0
| 0
| 0
|
f1343917f3d52976f0620a8993ab397c596d873e
| 321
|
py
|
Python
|
dialogos/quotes/urls.py
|
bertucho/epic-movie-quotes-quiz
|
09e4ec58a441ab74c1ce6e0fde4e71b08a4d7250
|
[
"MIT"
] | null | null | null |
dialogos/quotes/urls.py
|
bertucho/epic-movie-quotes-quiz
|
09e4ec58a441ab74c1ce6e0fde4e71b08a4d7250
|
[
"MIT"
] | null | null | null |
dialogos/quotes/urls.py
|
bertucho/epic-movie-quotes-quiz
|
09e4ec58a441ab74c1ce6e0fde4e71b08a4d7250
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, url
from quotes import views
from views import *
urlpatterns = patterns('',
url(r'^sdf$', index, name='index'),
url(r'^$', GameView.as_view(), name='game'),
url(r'^post$', AnswerView.as_view(), name='answer'),
url(r'^edit$', QuoteUpdate.as_view(), name='update'),
)
| 29.181818
| 55
| 0.65109
|
from django.conf.urls import patterns, url
from quotes import views
from views import *
urlpatterns = patterns('',
url(r'^sdf$', index, name='index'),
url(r'^$', GameView.as_view(), name='game'),
url(r'^post$', AnswerView.as_view(), name='answer'),
url(r'^edit$', QuoteUpdate.as_view(), name='update'),
)
| 0
| 0
| 0
|
865a4f01f0d87df969a23aa8aff7cef597e09010
| 1,315
|
py
|
Python
|
readthedocs/payments/utils.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 2,092
|
2019-06-29T07:47:30.000Z
|
2022-03-31T14:54:59.000Z
|
readthedocs/payments/utils.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 2,389
|
2019-06-29T04:22:55.000Z
|
2022-03-31T22:57:49.000Z
|
readthedocs/payments/utils.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 1,185
|
2019-06-29T21:49:31.000Z
|
2022-03-30T09:57:15.000Z
|
# -*- coding: utf-8 -*-
"""
Payment utility functions.
These are mostly one-off functions. Define the bulk of Stripe operations on
:py:class:`readthedocs.payments.forms.StripeResourceMixin`.
"""
import structlog
import stripe
from django.conf import settings
stripe.api_key = settings.STRIPE_SECRET
log = structlog.get_logger(__name__)
def delete_customer(customer_id):
"""Delete customer from Stripe, cancelling subscriptions."""
try:
customer = stripe.Customer.retrieve(customer_id)
return customer.delete()
except stripe.error.InvalidRequestError:
log.exception(
'Customer not deleted. Customer not found on Stripe.',
stripe_customer=customer_id,
)
def cancel_subscription(customer_id, subscription_id):
"""Cancel Stripe subscription, if it exists."""
try:
customer = stripe.Customer.retrieve(customer_id)
if hasattr(customer, 'subscriptions'):
subscription = customer.subscriptions.retrieve(subscription_id)
return subscription.delete()
except stripe.error.StripeError:
log.exception(
'Subscription not cancelled. Customer/Subscription not found on Stripe. ',
stripe_customer=customer_id,
stripe_subscription=subscription_id,
)
| 28.586957
| 86
| 0.696578
|
# -*- coding: utf-8 -*-
"""
Payment utility functions.
These are mostly one-off functions. Define the bulk of Stripe operations on
:py:class:`readthedocs.payments.forms.StripeResourceMixin`.
"""
import structlog
import stripe
from django.conf import settings
stripe.api_key = settings.STRIPE_SECRET
log = structlog.get_logger(__name__)
def delete_customer(customer_id):
"""Delete customer from Stripe, cancelling subscriptions."""
try:
customer = stripe.Customer.retrieve(customer_id)
return customer.delete()
except stripe.error.InvalidRequestError:
log.exception(
'Customer not deleted. Customer not found on Stripe.',
stripe_customer=customer_id,
)
def cancel_subscription(customer_id, subscription_id):
"""Cancel Stripe subscription, if it exists."""
try:
customer = stripe.Customer.retrieve(customer_id)
if hasattr(customer, 'subscriptions'):
subscription = customer.subscriptions.retrieve(subscription_id)
return subscription.delete()
except stripe.error.StripeError:
log.exception(
'Subscription not cancelled. Customer/Subscription not found on Stripe. ',
stripe_customer=customer_id,
stripe_subscription=subscription_id,
)
| 0
| 0
| 0
|
0f5142437d1335663663b0990f48013726117758
| 13,770
|
py
|
Python
|
firewall/__init__.py
|
dechainers/dechainy_plugin_firewall
|
53327f60880bdaf8a1d5ffe85662c46f7a8b44d4
|
[
"Apache-2.0"
] | null | null | null |
firewall/__init__.py
|
dechainers/dechainy_plugin_firewall
|
53327f60880bdaf8a1d5ffe85662c46f7a8b44d4
|
[
"Apache-2.0"
] | null | null | null |
firewall/__init__.py
|
dechainers/dechainy_plugin_firewall
|
53327f60880bdaf8a1d5ffe85662c46f7a8b44d4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 DeChainers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes as ct
from dataclasses import dataclass, field
from math import ceil
from typing import Callable, ClassVar, Dict, List
from dechainy.ebpf import BPF
from dechainy.exceptions import HookDisabledException
from dechainy.plugins import Probe
from dechainy.utility import (ipv4_to_network_int, port_to_network_int,
protocol_to_int)
@dataclass
class FirewallRule:
"""Class to represent a firewall iptable-like rule
Attributes:
src (str): The source address to match. Default None.
dst (str): The destination address to match. Default None.
sport (int): The source port to match. Default None.
dport (int): The destination port to match. Default None.
l4proto (str): The Layer 4 protocol to match. Default None.
tcpflags (str): A string containing the names of the TCP Flags to match. Default None.
"""
src: str = None
dst: str = None
sport: int = None
dport: int = None
l4proto: str = None
tcpflags: str = None
@dataclass
class Firewall(Probe):
"""Firewall class, an eBPF implementation of an iptables-like one."""
# Size of the eBPF maps, how many entries can they accept
_MAPS: ClassVar[List[str]] = ['IPV4_SRC', 'IPV4_DST', 'PORT_SRC',
'PORT_DST', 'IP_PROTO', 'TCP_FLAGS']
_ALL_MAPS: ClassVar[List[str]] = _MAPS + [f"{x}_WILDCARDS" for x in _MAPS]
_rule_ids_word_per_entry: ClassVar[int] = 10
_max_rules: int = 1000
__rules: Dict[str, List[FirewallRule]] = field(default_factory=dict)
def get_at(self, program_type: str, rule_id: int) -> FirewallRule:
"""Function to retrieve a rule at a specific position
Args:
program_type (str): The hook of interest (ingress/egress)
rule_id (int): The ID of the rule
Raises:
IndexError: The provided ID is greater than the actual number of rules
Returns:
FirewallRule: The retrieved rule
"""
self.__check_hook_active(program_type)
if rule_id >= len(self.__rules[program_type]):
raise IndexError("The Rule ID provided is wrong")
return self.__rules[program_type][rule_id]
def get(self, program_type: str) -> str:
"""Function to retrieve all the rules for a specific hook
Args:
program_type (str): The hook of interest (ingress/egress)
Returns:
List[FirewallRule]: The list of rules
"""
self.__check_hook_active(program_type)
return self.__rules[program_type]
def delete_at(self, program_type: str, rule_id: int) -> str:
"""Function to delete a rule at a given position (ID)
Args:
program_type (str): The hook of interest (ingress/egress)
rule_id (int): The ID of the rule to be deleted
Raises:
IndexError: The provided ID is greater than the number of actual rules
Returns:
int: The rule ID
"""
self.__check_hook_active(program_type)
if rule_id >= len(self.__rules[program_type]):
raise IndexError("The Rule ID provided is wrong")
self.__rules[program_type].pop(rule_id)
word, offset_ok = (rule_id // 64, 64 - rule_id % 64)
prog = self[program_type]
# Foreach map, also for the WILDCARDS ones, iterate through every
# key-value and shift left the rules by 1, to remove the target one
for map_name in Firewall._ALL_MAPS:
for key, value in prog[map_name].items():
arr = value.rule_words
cnt_zeros = 0
carry = 0
# Starting from right to left
for w in range(self._rule_ids_word_per_entry - 1, word, -1):
cnt_zeros += int(arr[w] == 0)
tmp = carry
carry = arr[w] >> 63
arr[w] = (arr[w] << 1) | tmp
cnt_zeros += int(arr[word] == 0)
# If all zeros, then remove the entire entry
if cnt_zeros == self._rule_ids_word_per_entry:
del prog[map_name][key]
continue
# Finishing the current word, which has also the offset into account
ok = (arr[word] >> offset_ok) << offset_ok
to_shift = (arr[word] & (pow(2, offset_ok) - 1)) << 1 | carry
arr[word] = ok | to_shift
prog[map_name][key] = arr
prog['ACTIONS'][ct.c_uint32(
rule_id)] = ct.c_uint8(BPF.TC_ACT_OK if self.mode == BPF.SCHED_CLS else BPF.XDP_PASS)
return rule_id
def delete(self, program_type: str, rule: FirewallRule) -> str:
"""Function to delete a rule matching the provided one
Args:
program_type (str): The hook of interest (ingress/egress)
rule (FirewallRule): The rule to be deleted
Raises:
LookupError: If the rule does not match any of the present ones
Returns:
int: The ID of the deleted rule
"""
self.__check_hook_active(program_type)
if rule not in self.__rules[program_type]:
raise LookupError(
"Attempting to delete a rule which is not present")
return self.delete_at(program_type, self.__rules[program_type].index(rule))
def insert_at(self, program_type: str, rule_id: int, rule: FirewallRule) -> str:
"""Function to insert a rule at a given position. All the following ones are shifted
Args:
program_type (str): The hook of interest (ingress/egress)
rule_id (int): The ID of the rule (position)
rule (FirewallRule): The rule to be inserted
Raises:
LookupError: The new rule is already present
IndexError: The rule ID is greater than the actual number of rules
MemoryError: There is no room for more rules
Returns:
int: The ID of the rule
"""
self.__check_hook_active(program_type)
if rule in self.__rules[program_type]:
raise LookupError(
"Attempting to insert a rule which is already present")
if rule_id > len(self.__rules[program_type]):
raise IndexError("The Rule ID provided is wrong")
if rule_id == self._max_rules:
raise MemoryError("You reached the maximum amount of rules")
word, offset = (rule_id // 64, 63 - rule_id % 64)
offset_ok = offset + 1
prog = self[program_type]
# If the id is in the middle of the list, then all the following rules has
# to be shifted right by 1, for each map (also WILDCARDS)
if rule_id < len(self.__rules[program_type]):
for map_name in Firewall._ALL_MAPS:
for key, value in prog[map_name].items():
# Starting from left to right, thus the 1st word has also the offset
# into account
arr = value.rule_words
carry = arr[word] & 1
ok = (arr[word] >> offset_ok) << offset_ok
to_shift = (arr[word] & (pow(2, offset_ok) - 1)) >> 1
arr[word] = ok | to_shift
# Finishing all the other words
for w in range(word + 1, self.__rule_ids_word_per_entry):
tmp = carry
carry = arr[w] & 1
arr[w] = (arr[w] >> 1) | (tmp << 63)
prog[map_name][key] = arr
# Insert into the maps, at the specific position the value 1, according
# to the values specified in the rule
for map_name, value in zip(Firewall._MAPS, Firewall.translate_rule(rule, prog['IPV4_SRC'].Key)):
if value is None:
map_name = f'{map_name}_WILDCARDS'
value = 0
if value in prog[map_name]:
arr = prog[map_name][value].rule_words
else:
arr = (ct.c_uint64 * self._rule_ids_word_per_entry)()
arr[word] |= (1 << offset)
prog[map_name][value] = arr
prog['ACTIONS'][ct.c_uint32(
rule_id)] = ct.c_uint8(BPF.TC_ACT_SHOT if self.mode == BPF.SCHED_CLS else BPF.XDP_DROP)
self.__rules[program_type].insert(rule_id, rule)
return rule_id
def insert(self, program_type: str, rule: FirewallRule) -> str:
"""Function to insert the rule given no position (append)
Args:
program_type (str): The hook of interest (ingress/egress)
rule (FirewallRule): The rule to be inserted
Returns:
int: The rule ID
"""
return self.insert_at(program_type, len(self.__rules[program_type]), rule)
def update(self, program_type: str, rule_id: int, rule: FirewallRule) -> str:
"""Function to update a specific rule given its ID
Args:
program_type (str): The hook of interest (ingress/egress)
rule_id (int): The ID of the rule to be updated
rule (FirewallRule): The new rule to be inserted
Raises:
LookupError: The new rule is already present
IndexError: The rule ID is greater than the actual number of rules
Returns:
int: The id of the rule
"""
self.__check_hook_active(program_type)
if rule in self.__rules[program_type]:
raise LookupError(
"Attempting to update a rule which is already present")
if rule_id >= len(self.__rules[program_type]):
raise IndexError("The Rule ID provided is wrong")
self.delete_at(program_type, rule_id)
self.insert_at(program_type, rule_id, rule)
return rule_id
def reset(self, program_type: str) -> str:
"""Function to reset the rules of the Firewall instance
Args:
program_type (str): The hook of interest (ingress/egress)
Returns:
int: The number of rules erased
"""
self.__check_hook_active(program_type)
ret = len(self.__rules[program_type])
self.__rules[program_type].clear()
for map_name in Firewall._ALL_MAPS:
self[program_type][map_name].clear()
return ret
@staticmethod
def translate_rule(rule: FirewallRule, LpmKey: Callable) -> List[any]:
"""Static function to translate a rule into values ready to be inserted in the eBPF maps.
Args:
rule (FirewallRule): The rule to be converted
LpmKey (Callable): The reference to the key structure to be invoked
Returns:
List[any]: List of converted fields using ctypes
"""
return [
translate_ip(rule.src) if rule.src else None,
translate_ip(rule.dst) if rule.dst else None,
ct.c_uint16(port_to_network_int(rule.sport)
) if rule.sport else None,
ct.c_uint16(port_to_network_int(rule.dport)
) if rule.dport else None,
ct.c_uint8(protocol_to_int(rule.l4proto)
) if rule.l4proto else None,
translate_flags(rule.tcpflags) if rule.tcpflags else None
]
| 40.739645
| 122
| 0.59557
|
# Copyright 2022 DeChainers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes as ct
from dataclasses import dataclass, field
from math import ceil
from typing import Callable, ClassVar, Dict, List
from dechainy.ebpf import BPF
from dechainy.exceptions import HookDisabledException
from dechainy.plugins import Probe
from dechainy.utility import (ipv4_to_network_int, port_to_network_int,
protocol_to_int)
@dataclass
class FirewallRule:
"""Class to represent a firewall iptable-like rule
Attributes:
src (str): The source address to match. Default None.
dst (str): The destination address to match. Default None.
sport (int): The source port to match. Default None.
dport (int): The destination port to match. Default None.
l4proto (str): The Layer 4 protocol to match. Default None.
tcpflags (str): A string containing the names of the TCP Flags to match. Default None.
"""
src: str = None
dst: str = None
sport: int = None
dport: int = None
l4proto: str = None
tcpflags: str = None
def __post_init__(self):
if not self.src and not self.dst and not self.sport and not self.dport and not self.l4proto and not self.tcpflags:
raise KeyError(
"Impossible inserting a rule without specifying at least a field")
@dataclass
class Firewall(Probe):
"""Firewall class, an eBPF implementation of an iptables-like one."""
# Size of the eBPF maps, how many entries can they accept
_MAPS: ClassVar[List[str]] = ['IPV4_SRC', 'IPV4_DST', 'PORT_SRC',
'PORT_DST', 'IP_PROTO', 'TCP_FLAGS']
_ALL_MAPS: ClassVar[List[str]] = _MAPS + [f"{x}_WILDCARDS" for x in _MAPS]
_rule_ids_word_per_entry: ClassVar[int] = 10
_max_rules: int = 1000
__rules: Dict[str, List[FirewallRule]] = field(default_factory=dict)
def __post_init__(self):
if not self.egress.required:
self.ingress.required = True
for hook in ["ingress", "egress"]:
conf = getattr(self, hook)
if conf.required:
conf.cflags = ['-DFW_ACTION_DEFAULT=DROP',
f'-DRULE_IDS_MAX_ENTRY={ceil(self._max_rules / self._rule_ids_word_per_entry)}',
f'-DMAX_RULES={self._max_rules}',
f'-DRULE_IDS_WORDS_PER_ENTRY={self._rule_ids_word_per_entry}']
if hook not in self.__rules:
self.__rules[hook] = []
super().__post_init__(path=__file__)
def post_compilation(self):
for hook in ["ingress", "egress"]:
if hook in self.__rules and self.__rules[hook]:
[self.insert(hook, x) for x in self.__rules[hook]]
def __check_hook_active(self, program_type):
if program_type not in self.__rules:
raise HookDisabledException(
f"The hook {program_type} is not active for this probe")
def get_at(self, program_type: str, rule_id: int) -> FirewallRule:
"""Function to retrieve a rule at a specific position
Args:
program_type (str): The hook of interest (ingress/egress)
rule_id (int): The ID of the rule
Raises:
IndexError: The provided ID is greater than the actual number of rules
Returns:
FirewallRule: The retrieved rule
"""
self.__check_hook_active(program_type)
if rule_id >= len(self.__rules[program_type]):
raise IndexError("The Rule ID provided is wrong")
return self.__rules[program_type][rule_id]
def get(self, program_type: str) -> str:
"""Function to retrieve all the rules for a specific hook
Args:
program_type (str): The hook of interest (ingress/egress)
Returns:
List[FirewallRule]: The list of rules
"""
self.__check_hook_active(program_type)
return self.__rules[program_type]
def delete_at(self, program_type: str, rule_id: int) -> str:
"""Function to delete a rule at a given position (ID)
Args:
program_type (str): The hook of interest (ingress/egress)
rule_id (int): The ID of the rule to be deleted
Raises:
IndexError: The provided ID is greater than the number of actual rules
Returns:
int: The rule ID
"""
self.__check_hook_active(program_type)
if rule_id >= len(self.__rules[program_type]):
raise IndexError("The Rule ID provided is wrong")
self.__rules[program_type].pop(rule_id)
word, offset_ok = (rule_id // 64, 64 - rule_id % 64)
prog = self[program_type]
# Foreach map, also for the WILDCARDS ones, iterate through every
# key-value and shift left the rules by 1, to remove the target one
for map_name in Firewall._ALL_MAPS:
for key, value in prog[map_name].items():
arr = value.rule_words
cnt_zeros = 0
carry = 0
# Starting from right to left
for w in range(self._rule_ids_word_per_entry - 1, word, -1):
cnt_zeros += int(arr[w] == 0)
tmp = carry
carry = arr[w] >> 63
arr[w] = (arr[w] << 1) | tmp
cnt_zeros += int(arr[word] == 0)
# If all zeros, then remove the entire entry
if cnt_zeros == self._rule_ids_word_per_entry:
del prog[map_name][key]
continue
# Finishing the current word, which has also the offset into account
ok = (arr[word] >> offset_ok) << offset_ok
to_shift = (arr[word] & (pow(2, offset_ok) - 1)) << 1 | carry
arr[word] = ok | to_shift
prog[map_name][key] = arr
prog['ACTIONS'][ct.c_uint32(
rule_id)] = ct.c_uint8(BPF.TC_ACT_OK if self.mode == BPF.SCHED_CLS else BPF.XDP_PASS)
return rule_id
def delete(self, program_type: str, rule: FirewallRule) -> str:
"""Function to delete a rule matching the provided one
Args:
program_type (str): The hook of interest (ingress/egress)
rule (FirewallRule): The rule to be deleted
Raises:
LookupError: If the rule does not match any of the present ones
Returns:
int: The ID of the deleted rule
"""
self.__check_hook_active(program_type)
if rule not in self.__rules[program_type]:
raise LookupError(
"Attempting to delete a rule which is not present")
return self.delete_at(program_type, self.__rules[program_type].index(rule))
def insert_at(self, program_type: str, rule_id: int, rule: FirewallRule) -> str:
"""Function to insert a rule at a given position. All the following ones are shifted
Args:
program_type (str): The hook of interest (ingress/egress)
rule_id (int): The ID of the rule (position)
rule (FirewallRule): The rule to be inserted
Raises:
LookupError: The new rule is already present
IndexError: The rule ID is greater than the actual number of rules
MemoryError: There is no room for more rules
Returns:
int: The ID of the rule
"""
self.__check_hook_active(program_type)
if rule in self.__rules[program_type]:
raise LookupError(
"Attempting to insert a rule which is already present")
if rule_id > len(self.__rules[program_type]):
raise IndexError("The Rule ID provided is wrong")
if rule_id == self._max_rules:
raise MemoryError("You reached the maximum amount of rules")
word, offset = (rule_id // 64, 63 - rule_id % 64)
offset_ok = offset + 1
prog = self[program_type]
# If the id is in the middle of the list, then all the following rules has
# to be shifted right by 1, for each map (also WILDCARDS)
if rule_id < len(self.__rules[program_type]):
for map_name in Firewall._ALL_MAPS:
for key, value in prog[map_name].items():
# Starting from left to right, thus the 1st word has also the offset
# into account
arr = value.rule_words
carry = arr[word] & 1
ok = (arr[word] >> offset_ok) << offset_ok
to_shift = (arr[word] & (pow(2, offset_ok) - 1)) >> 1
arr[word] = ok | to_shift
# Finishing all the other words
for w in range(word + 1, self.__rule_ids_word_per_entry):
tmp = carry
carry = arr[w] & 1
arr[w] = (arr[w] >> 1) | (tmp << 63)
prog[map_name][key] = arr
# Insert into the maps, at the specific position the value 1, according
# to the values specified in the rule
for map_name, value in zip(Firewall._MAPS, Firewall.translate_rule(rule, prog['IPV4_SRC'].Key)):
if value is None:
map_name = f'{map_name}_WILDCARDS'
value = 0
if value in prog[map_name]:
arr = prog[map_name][value].rule_words
else:
arr = (ct.c_uint64 * self._rule_ids_word_per_entry)()
arr[word] |= (1 << offset)
prog[map_name][value] = arr
prog['ACTIONS'][ct.c_uint32(
rule_id)] = ct.c_uint8(BPF.TC_ACT_SHOT if self.mode == BPF.SCHED_CLS else BPF.XDP_DROP)
self.__rules[program_type].insert(rule_id, rule)
return rule_id
def insert(self, program_type: str, rule: FirewallRule) -> str:
"""Function to insert the rule given no position (append)
Args:
program_type (str): The hook of interest (ingress/egress)
rule (FirewallRule): The rule to be inserted
Returns:
int: The rule ID
"""
return self.insert_at(program_type, len(self.__rules[program_type]), rule)
def update(self, program_type: str, rule_id: int, rule: FirewallRule) -> str:
"""Function to update a specific rule given its ID
Args:
program_type (str): The hook of interest (ingress/egress)
rule_id (int): The ID of the rule to be updated
rule (FirewallRule): The new rule to be inserted
Raises:
LookupError: The new rule is already present
IndexError: The rule ID is greater than the actual number of rules
Returns:
int: The id of the rule
"""
self.__check_hook_active(program_type)
if rule in self.__rules[program_type]:
raise LookupError(
"Attempting to update a rule which is already present")
if rule_id >= len(self.__rules[program_type]):
raise IndexError("The Rule ID provided is wrong")
self.delete_at(program_type, rule_id)
self.insert_at(program_type, rule_id, rule)
return rule_id
def reset(self, program_type: str) -> str:
"""Function to reset the rules of the Firewall instance
Args:
program_type (str): The hook of interest (ingress/egress)
Returns:
int: The number of rules erased
"""
self.__check_hook_active(program_type)
ret = len(self.__rules[program_type])
self.__rules[program_type].clear()
for map_name in Firewall._ALL_MAPS:
self[program_type][map_name].clear()
return ret
@staticmethod
def translate_rule(rule: FirewallRule, LpmKey: Callable) -> List[any]:
"""Static function to translate a rule into values ready to be inserted in the eBPF maps.
Args:
rule (FirewallRule): The rule to be converted
LpmKey (Callable): The reference to the key structure to be invoked
Returns:
List[any]: List of converted fields using ctypes
"""
def translate_ip(ip: str):
tmp = ip.split("/")
return LpmKey(ct.c_uint32(int(tmp[1]) if len(tmp) == 2 else 32), ct.c_uint32(ipv4_to_network_int(tmp[0])))
def translate_flags(flags: str):
upper = flags.upper()
ret = 0
for i, f in enumerate(["FIN", "SYN", "RST", "PSH", "ACK", "URG", "ECE", "CWR"]):
if f in upper:
ret |= (1 << i)
return ct.c_uint8(ret)
return [
translate_ip(rule.src) if rule.src else None,
translate_ip(rule.dst) if rule.dst else None,
ct.c_uint16(port_to_network_int(rule.sport)
) if rule.sport else None,
ct.c_uint16(port_to_network_int(rule.dport)
) if rule.dport else None,
ct.c_uint8(protocol_to_int(rule.l4proto)
) if rule.l4proto else None,
translate_flags(rule.tcpflags) if rule.tcpflags else None
]
| 1,668
| 0
| 169
|
3427cff3c40bf62d3968f67131ef4ac40d2b5e58
| 1,174
|
py
|
Python
|
examples/rules/PropertiesTagsRequired.py
|
obobrova/cfn-python-lint
|
42c0cd89577a39e903e5ef8a337926cc7ff6822c
|
[
"MIT-0"
] | null | null | null |
examples/rules/PropertiesTagsRequired.py
|
obobrova/cfn-python-lint
|
42c0cd89577a39e903e5ef8a337926cc7ff6822c
|
[
"MIT-0"
] | 1
|
2020-04-15T16:36:10.000Z
|
2020-04-15T16:36:10.000Z
|
examples/rules/PropertiesTagsRequired.py
|
obobrova/cfn-python-lint
|
42c0cd89577a39e903e5ef8a337926cc7ff6822c
|
[
"MIT-0"
] | 1
|
2020-01-05T01:05:55.000Z
|
2020-01-05T01:05:55.000Z
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class PropertiesTagsRequired(CloudFormationLintRule):
"""Check if Tags have required keys"""
id = 'E9000'
shortdesc = 'Tags have correct key values'
description = 'Check Tags for resources'
tags = ['resources', 'tags']
def match(self, cfn):
"""Check Tags for required keys"""
matches = []
required_tags = ['CostCenter', 'ApplicationName']
all_tags = cfn.search_deep_keys('Tags')
all_tags = [x for x in all_tags if x[0] == 'Resources']
for all_tag in all_tags:
all_keys = [d.get('Key') for d in all_tag[-1]]
for required_tag in required_tags:
if required_tag not in all_keys:
message = "Missing Tag {0} at {1}"
matches.append(
RuleMatch(
all_tag[:-1],
message.format(required_tag, '/'.join(map(str, all_tag[:-1])))))
return matches
| 32.611111
| 92
| 0.584327
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class PropertiesTagsRequired(CloudFormationLintRule):
"""Check if Tags have required keys"""
id = 'E9000'
shortdesc = 'Tags have correct key values'
description = 'Check Tags for resources'
tags = ['resources', 'tags']
def match(self, cfn):
"""Check Tags for required keys"""
matches = []
required_tags = ['CostCenter', 'ApplicationName']
all_tags = cfn.search_deep_keys('Tags')
all_tags = [x for x in all_tags if x[0] == 'Resources']
for all_tag in all_tags:
all_keys = [d.get('Key') for d in all_tag[-1]]
for required_tag in required_tags:
if required_tag not in all_keys:
message = "Missing Tag {0} at {1}"
matches.append(
RuleMatch(
all_tag[:-1],
message.format(required_tag, '/'.join(map(str, all_tag[:-1])))))
return matches
| 0
| 0
| 0
|
bc7a93bfaac1d5f34fa4f91b691e774c1559b5da
| 5,227
|
py
|
Python
|
neko/modules/mongodb/warns_db.py
|
itzrexmodz/Carla-1
|
c775063a0ef0d144649ace6f428c37e6e8d2c092
|
[
"MIT"
] | 4
|
2021-12-26T11:42:45.000Z
|
2021-12-27T05:18:17.000Z
|
neko/modules/mongodb/warns_db.py
|
itzrexmodz/Carla-1
|
c775063a0ef0d144649ace6f428c37e6e8d2c092
|
[
"MIT"
] | null | null | null |
neko/modules/mongodb/warns_db.py
|
itzrexmodz/Carla-1
|
c775063a0ef0d144649ace6f428c37e6e8d2c092
|
[
"MIT"
] | 7
|
2021-12-26T14:49:37.000Z
|
2022-01-31T09:32:07.000Z
|
import datetime
from .. import db, dt_delta
warns = db.warn_s
settings = db.warn_settings
| 25.748768
| 88
| 0.521523
|
import datetime
from .. import db, dt_delta
warns = db.warn_s
settings = db.warn_settings
def warn_user(user_id, chat_id, reason=""):
_warn = warns.find_one({"chat_id": chat_id, "user_id": user_id})
if _warn:
reasons = _warn.get("reasons") or []
reasons.append(reason)
num_warns = _warn["num_warns"] + 1
else:
reasons = [reason]
num_warns = 1
rr = reasons
p = settings.find_one({"chat_id": chat_id})
if p and p.get("expire"):
h, m = dt_delta(p.get("expiretime"))
expire = True
expireafter = datetime.datetime.now() + datetime.timedelta(hours=h, minutes=m)
else:
expire = False
expireafter = 0
num_w = num_warns
if p and num_warns == p.get("limit"):
num_w = 0
reasons = []
elif not p and num_warns == 3:
num_w = 0
reasons = []
warns.update_one(
{"chat_id": chat_id, "user_id": user_id},
{
"$set": {
"reasons": reasons,
"num_warns": num_w,
"expire": expire,
"expireafter": expireafter,
}
},
upsert=True,
)
if p and num_warns == p.get("limit"):
return (
True,
p.get("strength"),
p.get("time"),
p.get("limit"),
num_warns,
rr or [],
)
elif not p and num_warns == 3:
return (True, "ban", 0, 3, num_warns, rr or [])
else:
return False, None, None, p.get("limit") or 3, num_w, []
def remove_warn(user_id, chat_id):
_warn = warns.find_one({"chat_id": chat_id, "user_id": user_id})
if _warn and _warn["num_warns"] > 0:
warns.update_one(
{"chat_id": chat_id, "user_id": user_id},
{"$set": {"num_warns": _warn["num_warns"] - 1}},
upsert=True,
)
return True
return False
def reset_warns(user_id, chat_id):
_warn = warns.find_one({"chat_id": chat_id, "user_id": user_id})
if _warn and _warn["num_warns"] > 0:
warns.update_one(
{"chat_id": chat_id, "user_id": user_id},
{"$set": {"num_warns": 0}},
upsert=True,
)
return True
return False
def get_warns(user_id, chat_id):
_warn = warns.find_one({"chat_id": chat_id, "user_id": user_id})
if _warn:
return _warn["num_warns"], _warn["reasons"]
return None
def reset_all_warns(chat_id):
warns.delete_one({"chat_id": chat_id})
def set_warn_limit(chat_id, limit=3):
_settings = settings.find_one({"chat_id": chat_id})
if _settings:
warn_strength = _settings.get("strength")
warn_time = _settings.get("time")
expire = _settings.get("expire")
expiretime = _settings.get("expiretime")
else:
warn_strength = "ban"
warn_time = 0
expire = False
expiretime = 0
settings.update_one(
{"chat_id": chat_id},
{
"$set": {
"limit": limit,
"strength": warn_strength,
"time": warn_time,
"expire": expire,
"expiretime": expiretime,
}
},
upsert=True,
)
def set_warn_strength(chat_id, mode, time=0):
_settings = settings.find_one({"chat_id": chat_id})
if _settings:
limit = _settings.get("limit")
expire = _settings.get("expire")
expiretime = _settings.get("expiretime")
else:
limit = 3
expire = False
expiretime = 0
settings.update_one(
{"chat_id": chat_id},
{
"$set": {
"limit": limit,
"strength": mode,
"time": time,
"expire": expire,
"expiretime": expiretime,
}
},
upsert=True,
)
def get_warn_strength(chat_id):
_s = settings.find_one({"chat_id": chat_id})
if _s:
return _s.get("strength"), _s.get("time")
return "ban", 0
def get_warn_limit(chat_id):
_s = settings.find_one({"chat_id": chat_id})
if _s:
return _s.get("limit")
return 3
def get_warn_settings(chat_id):
_s = settings.find_one({"chat_id": chat_id})
if _s:
return _s.get("limit"), _s.get("strength"), _s.get("time"), _s.get("expiretime")
return 3, "ban", 0, 0
def set_warn_expire(chat_id, time):
_s = settings.find_one({"chat_id": chat_id})
if _s:
strength = _s.get("strength")
warntime = _s.get("time")
limit = _s.get("limit")
else:
strength = "ban"
limit = 3
warntime = 0
if time != 0:
mode = True
else:
mode = False
settings.update_one(
{"chat_id": chat_id},
{
"$set": {
"limit": limit,
"strength": strength,
"time": warntime,
"expire": mode,
"expiretime": time,
}
},
upsert=True,
)
def get_warn_expire(chat_id):
_s = settings.find_one({"chat_id": chat_id})
if _s:
return _s.get("expire"), _s.get("expiretime")
return False, 0
| 4,847
| 0
| 276
|
b080b624d3d62e4c09ba172f0c46e7190732c3cd
| 345
|
py
|
Python
|
CondTools/L1Trigger/python/L1UniformTags_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
CondTools/L1Trigger/python/L1UniformTags_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
CondTools/L1Trigger/python/L1UniformTags_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
# print i, initL1UniformTags.tagBaseVec[ i ]
| 34.5
| 61
| 0.727536
|
def initL1UniformTags( tagBase = "IDEAL" ):
import FWCore.ParameterSet.Config as cms
from CondTools.L1Trigger.L1CondEnum_cfi import L1CondEnum
initL1UniformTags.tagBaseVec = []
for i in range( 0, L1CondEnum.NumL1Cond ):
initL1UniformTags.tagBaseVec.append( tagBase )
# print i, initL1UniformTags.tagBaseVec[ i ]
| 271
| 0
| 22
|
672442e7754fca2ff308af5c61785d64cbe504b0
| 1,398
|
py
|
Python
|
examples/example4.py
|
ZimmermanGroup/conformer-rl
|
beb98cbee6ba6efba686d7c6eebbf33fd737f279
|
[
"MIT"
] | 9
|
2021-09-03T18:46:46.000Z
|
2022-03-22T05:47:20.000Z
|
examples/example4.py
|
ZimmermanGroup/conformer-rl
|
beb98cbee6ba6efba686d7c6eebbf33fd737f279
|
[
"MIT"
] | 4
|
2021-07-15T03:57:26.000Z
|
2021-08-03T06:27:28.000Z
|
examples/example4.py
|
ZimmermanGroup/conformer-rl
|
beb98cbee6ba6efba686d7c6eebbf33fd737f279
|
[
"MIT"
] | 1
|
2022-03-17T01:59:36.000Z
|
2022-03-17T01:59:36.000Z
|
import numpy as np
import torch
from conformer_rl import utils
from conformer_rl.agents import A2CAgent
from conformer_rl.config import Config
from conformer_rl.environments import Task
from conformer_rl.models import RTGN
from conformer_rl.molecule_generation import test_alkane
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
import logging
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
utils.set_one_thread()
mol_config = test_alkane()
config = Config()
config.tag = 'example4'
config.network = RTGN(6, 128, edge_dim=6, node_dim=5).to(device)
# Batch Hyperparameters
config.num_workers = 10
config.rollout_length = 5
config.max_steps = 10000000
config.save_interval = config.num_workers*200*5
config.eval_interval = config.num_workers*200*5
config.eval_episodes = 2
# Coefficient Hyperparameters
lr = 5e-5 * np.sqrt(config.num_workers)
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=lr, eps=1e-5)
# Task Settings
config.train_env = Task('GibbsScorePruningEnv-v0', concurrency=True, num_envs=config.num_workers, seed=np.random.randint(0,1e5), mol_config=mol_config, max_steps=200)
config.eval_env = Task('GibbsScorePruningEnv-v0', seed=np.random.randint(0,7e4), mol_config=mol_config, max_steps=200)
agent = A2CAgent(config)
agent.run_steps()
| 33.285714
| 170
| 0.752504
|
import numpy as np
import torch
from conformer_rl import utils
from conformer_rl.agents import A2CAgent
from conformer_rl.config import Config
from conformer_rl.environments import Task
from conformer_rl.models import RTGN
from conformer_rl.molecule_generation import test_alkane
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
import logging
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
utils.set_one_thread()
mol_config = test_alkane()
config = Config()
config.tag = 'example4'
config.network = RTGN(6, 128, edge_dim=6, node_dim=5).to(device)
# Batch Hyperparameters
config.num_workers = 10
config.rollout_length = 5
config.max_steps = 10000000
config.save_interval = config.num_workers*200*5
config.eval_interval = config.num_workers*200*5
config.eval_episodes = 2
# Coefficient Hyperparameters
lr = 5e-5 * np.sqrt(config.num_workers)
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=lr, eps=1e-5)
# Task Settings
config.train_env = Task('GibbsScorePruningEnv-v0', concurrency=True, num_envs=config.num_workers, seed=np.random.randint(0,1e5), mol_config=mol_config, max_steps=200)
config.eval_env = Task('GibbsScorePruningEnv-v0', seed=np.random.randint(0,7e4), mol_config=mol_config, max_steps=200)
agent = A2CAgent(config)
agent.run_steps()
| 0
| 0
| 0
|
525a8b00826a337a4c293642d7c027ab056d2b82
| 2,259
|
py
|
Python
|
nlp/router.py
|
kirollosHossam/MachineLearningTask
|
3780513af04cf7bb97432436b4714c32d1c271e6
|
[
"MIT"
] | null | null | null |
nlp/router.py
|
kirollosHossam/MachineLearningTask
|
3780513af04cf7bb97432436b4714c32d1c271e6
|
[
"MIT"
] | null | null | null |
nlp/router.py
|
kirollosHossam/MachineLearningTask
|
3780513af04cf7bb97432436b4714c32d1c271e6
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Dict, List, Optional, Union
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from nlp.nlp import Trainer
app = FastAPI()
trainer = Trainer()
#BaseModel is used as data validator when using fast api it cares all about exception handilng and validate
#your incoming json to be what you want to be.
@app.get("/status", summary="Get current status of the system")
@app.get("/trainMachineLearning", summary="Train a new Machine Learning model")
@app.get("/trainDeepLearning", summary="Train a new Deep Learning model")
@app.post("/predict", summary="Predict single input")
@app.post("/predict-batch", summary="predict a batch of sentences")
@app.get("/")
| 30.527027
| 107
| 0.715803
|
from __future__ import annotations
from typing import Dict, List, Optional, Union
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from nlp.nlp import Trainer
app = FastAPI()
trainer = Trainer()
#BaseModel is used as data validator when using fast api it cares all about exception handilng and validate
#your incoming json to be what you want to be.
class TestingData(BaseModel):
texts: List[str]
class QueryText(BaseModel):
text: str
class StatusObject(BaseModel):
status: str
timestamp: str
classes: List[str]
evaluation: Dict
class PredictionObject(BaseModel):
text: str
predictions: Dict
class PredictionsObject(BaseModel):
predictions: List[PredictionObject]
@app.get("/status", summary="Get current status of the system")
def get_status():
status = trainer.get_status()
return StatusObject(**status)
@app.get("/trainMachineLearning", summary="Train a new Machine Learning model")
def train():
try:
trainer.trainMachineLearning(trainer.merge().text, trainer.merge().dialect)
status = trainer.get_status()
return StatusObject(**status)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.get("/trainDeepLearning", summary="Train a new Deep Learning model")
def train():
try:
trainer.trainDeepLearning(trainer.merge().text, trainer.merge().dialect)
status = trainer.get_status()
return StatusObject(**status)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.post("/predict", summary="Predict single input")
def predict(query_text: QueryText):
try:
prediction = trainer.predict([query_text.text])[0]
return PredictionObject(**prediction)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.post("/predict-batch", summary="predict a batch of sentences")
def predict_batch(testing_data:TestingData):
try:
predictions = trainer.predict(testing_data.texts)
return PredictionsObject(predictions=predictions)
except Exception as e:
raise HTTPException(status_code=503, detail=str(e))
@app.get("/")
def home():
return({"message": "System is up"})
| 1,035
| 240
| 247
|
5e0c029a0d28d903e75c0e982a55151bc9d0cc84
| 720
|
py
|
Python
|
piecrust/__init__.py
|
ludovicchabant/PieCrust2
|
89b2bf268bfdaae24ff6cf6d8c29c0b1239be739
|
[
"Apache-2.0"
] | 43
|
2015-04-24T05:30:04.000Z
|
2022-02-03T17:47:35.000Z
|
piecrust/__init__.py
|
ludovicchabant/PieCrust2
|
89b2bf268bfdaae24ff6cf6d8c29c0b1239be739
|
[
"Apache-2.0"
] | 54
|
2015-01-03T01:58:44.000Z
|
2021-05-06T21:56:26.000Z
|
piecrust/__init__.py
|
ludovicchabant/PieCrust2
|
89b2bf268bfdaae24ff6cf6d8c29c0b1239be739
|
[
"Apache-2.0"
] | 8
|
2015-05-10T01:50:46.000Z
|
2016-12-26T20:53:15.000Z
|
CACHE_DIR = '_cache'
ASSETS_DIR = 'assets'
TEMPLATES_DIR = 'templates'
THEME_DIR = 'theme'
THEMES_DIR = 'themes'
PLUGINS_DIR = 'plugins'
CONFIG_PATH = 'config.yml'
THEME_CONFIG_PATH = 'theme_config.yml'
THEME_INFO_PATH = 'theme_info.yml'
ASSET_DIR_SUFFIX = '-assets'
DEFAULT_FORMAT = 'markdown'
DEFAULT_TEMPLATE_ENGINE = 'jinja2'
DEFAULT_POSTS_FS = 'flat'
DEFAULT_DATE_FORMAT = '%b %d, %Y'
DEFAULT_THEME_SOURCE = 'https://bitbucket.org/ludovicchabant/'
PIECRUST_URL = 'https://bolt80.com/piecrust/'
CACHE_VERSION = 34
try:
from piecrust.__version__ import APP_VERSION
except ImportError:
APP_VERSION = 'unknown'
import os.path # NOQA
RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources')
| 22.5
| 68
| 0.754167
|
CACHE_DIR = '_cache'
ASSETS_DIR = 'assets'
TEMPLATES_DIR = 'templates'
THEME_DIR = 'theme'
THEMES_DIR = 'themes'
PLUGINS_DIR = 'plugins'
CONFIG_PATH = 'config.yml'
THEME_CONFIG_PATH = 'theme_config.yml'
THEME_INFO_PATH = 'theme_info.yml'
ASSET_DIR_SUFFIX = '-assets'
DEFAULT_FORMAT = 'markdown'
DEFAULT_TEMPLATE_ENGINE = 'jinja2'
DEFAULT_POSTS_FS = 'flat'
DEFAULT_DATE_FORMAT = '%b %d, %Y'
DEFAULT_THEME_SOURCE = 'https://bitbucket.org/ludovicchabant/'
PIECRUST_URL = 'https://bolt80.com/piecrust/'
CACHE_VERSION = 34
try:
from piecrust.__version__ import APP_VERSION
except ImportError:
APP_VERSION = 'unknown'
import os.path # NOQA
RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources')
| 0
| 0
| 0
|
c7dc32f321e6e4a28ef4432c2172548e6b4b2753
| 709
|
py
|
Python
|
installib/obfuscation_checker.py
|
ivanprjcts/installib
|
2baa4b34cad1672b3e11700c2e3c5ea17a4dbc53
|
[
"Apache-2.0"
] | null | null | null |
installib/obfuscation_checker.py
|
ivanprjcts/installib
|
2baa4b34cad1672b3e11700c2e3c5ea17a4dbc53
|
[
"Apache-2.0"
] | null | null | null |
installib/obfuscation_checker.py
|
ivanprjcts/installib
|
2baa4b34cad1672b3e11700c2e3c5ea17a4dbc53
|
[
"Apache-2.0"
] | null | null | null |
import os
import hashlib
| 29.541667
| 108
| 0.603667
|
import os
import hashlib
def _hashfile(filepath):
sha1 = hashlib.sha1()
f = open(filepath, 'rb')
try:
sha1.update(f.read())
finally:
f.close()
return sha1.hexdigest()
def _equal_hash(file1, file2):
return _hashfile(file1) == _hashfile(file2)
def assert_not_equal_hash(path_to_file1, path_to_file2):
assert os.path.isfile(path_to_file1), "%s not found" % path_to_file1
assert os.path.isfile(path_to_file2), "%s not found" % path_to_file2
assert not _equal_hash(path_to_file1, path_to_file2), "Ups, %s and %s files are equal" % (path_to_file1,
path_to_file2)
| 612
| 0
| 69
|
15018b8b4e7500f24e9cb9d0490678a32711cf09
| 23,199
|
py
|
Python
|
kopf/reactor/handling.py
|
damc-dev/kopf
|
7c344fc682759bbf0647da60933021a75b3862d3
|
[
"MIT"
] | null | null | null |
kopf/reactor/handling.py
|
damc-dev/kopf
|
7c344fc682759bbf0647da60933021a75b3862d3
|
[
"MIT"
] | null | null | null |
kopf/reactor/handling.py
|
damc-dev/kopf
|
7c344fc682759bbf0647da60933021a75b3862d3
|
[
"MIT"
] | null | null | null |
"""
Conversion of low-level events to high-level causes, and handling them.
These functions are invoked from the queueing module `kopf.reactor.queueing`,
which are the actual event loop of the operator process.
The conversion of the low-level events to the high-level causes is done by
checking the object's state and comparing it to the preserved last-seen state.
The framework itself makes the necessary changes to the object, -- such as the
finalizers attachment, last-seen state updates, and handler status tracking, --
thus provoking the low-level watch-events and additional queueing calls.
But these internal changes are filtered out from the cause detection
and therefore do not trigger the user-defined handlers.
"""
import asyncio
import collections.abc
import datetime
from contextvars import ContextVar
from typing import Optional, Iterable, Collection, Any
from kopf.clients import patching
from kopf.engines import logging as logging_engine
from kopf.engines import posting
from kopf.engines import sleeping
from kopf.reactor import causation
from kopf.reactor import invocation
from kopf.reactor import lifecycles
from kopf.reactor import registries
from kopf.reactor import state
from kopf.structs import bodies
from kopf.structs import dicts
from kopf.structs import diffs
from kopf.structs import finalizers
from kopf.structs import lastseen
from kopf.structs import patches
from kopf.structs import resources
WAITING_KEEPALIVE_INTERVAL = 10 * 60
""" How often to wake up from the long sleep, to show the liveliness. """
DEFAULT_RETRY_DELAY = 1 * 60
""" The default delay duration for the regular exception in retry-mode. """
class PermanentError(Exception):
""" A fatal handler error, the retries are useless. """
class TemporaryError(Exception):
""" A potentially recoverable error, should be retried. """
class HandlerTimeoutError(PermanentError):
""" An error for the handler's timeout (if set). """
class HandlerChildrenRetry(TemporaryError):
""" An internal pseudo-error to retry for the next sub-handlers attempt. """
# The task-local context; propagated down the stack instead of multiple kwargs.
# Used in `@kopf.on.this` and `kopf.execute()` to add/get the sub-handlers.
sublifecycle_var: ContextVar[lifecycles.LifeCycleFn] = ContextVar('sublifecycle_var')
subregistry_var: ContextVar[registries.ResourceRegistry] = ContextVar('subregistry_var')
subexecuted_var: ContextVar[bool] = ContextVar('subexecuted_var')
handler_var: ContextVar[registries.ResourceHandler] = ContextVar('handler_var')
cause_var: ContextVar[causation.BaseCause] = ContextVar('cause_var')
async def resource_handler(
lifecycle: lifecycles.LifeCycleFn,
registry: registries.OperatorRegistry,
resource: resources.Resource,
event: bodies.Event,
freeze: asyncio.Event,
replenished: asyncio.Event,
event_queue: posting.K8sEventQueue,
) -> None:
"""
Handle a single custom object low-level watch-event.
Convert the low-level events, as provided by the watching/queueing tasks,
to the high-level causes, and then call the cause-handling logic.
All the internally provoked changes are intercepted, do not create causes,
and therefore do not call the handling logic.
"""
body: bodies.Body = event['object']
patch: patches.Patch = patches.Patch()
delay: Optional[float] = None
# Each object has its own prefixed logger, to distinguish parallel handling.
logger = logging_engine.ObjectLogger(body=body)
posting.event_queue_loop_var.set(asyncio.get_running_loop())
posting.event_queue_var.set(event_queue) # till the end of this object's task.
# If the global freeze is set for the processing (i.e. other operator overrides), do nothing.
if freeze.is_set():
logger.debug("Ignoring the events due to freeze.")
return
# Invoke all silent spies. No causation, no progress storage is performed.
if registry.has_resource_watching_handlers(resource=resource):
resource_watching_cause = causation.detect_resource_watching_cause(
event=event,
resource=resource,
logger=logger,
patch=patch,
)
await handle_resource_watching_cause(
lifecycle=lifecycles.all_at_once,
registry=registry,
cause=resource_watching_cause,
)
# Object patch accumulator. Populated by the methods. Applied in the end of the handler.
# Detect the cause and handle it (or at least log this happened).
if registry.has_resource_changing_handlers(resource=resource):
extra_fields = registry.get_extra_fields(resource=resource)
old, new, diff = lastseen.get_essential_diffs(body=body, extra_fields=extra_fields)
resource_changing_cause = causation.detect_resource_changing_cause(
event=event,
resource=resource,
logger=logger,
patch=patch,
old=old,
new=new,
diff=diff,
requires_finalizer=registry.requires_finalizer(resource=resource, body=body),
)
delay = await handle_resource_changing_cause(
lifecycle=lifecycle,
registry=registry,
cause=resource_changing_cause,
)
# Whatever was done, apply the accumulated changes to the object.
# But only once, to reduce the number of API calls and the generated irrelevant events.
if patch:
logger.debug("Patching with: %r", patch)
await patching.patch_obj(resource=resource, patch=patch, body=body)
# Sleep strictly after patching, never before -- to keep the status proper.
# The patching above, if done, interrupts the sleep instantly, so we skip it at all.
if delay and not patch:
logger.debug(f"Sleeping for {delay} seconds for the delayed handlers.")
unslept = await sleeping.sleep_or_wait(delay, replenished)
if unslept is not None:
logger.debug(f"Sleeping was interrupted by new changes, {unslept} seconds left.")
else:
now = datetime.datetime.utcnow()
dummy = patches.Patch({'status': {'kopf': {'dummy': now.isoformat()}}})
logger.debug("Provoking reaction with: %r", dummy)
await patching.patch_obj(resource=resource, patch=dummy, body=body)
async def handle_resource_watching_cause(
lifecycle: lifecycles.LifeCycleFn,
registry: registries.OperatorRegistry,
cause: causation.ResourceWatchingCause,
) -> None:
"""
Handle a received event, log but ignore all errors.
This is a lightweight version of the cause handling, but for the raw events,
without any progress persistence. Multi-step calls are also not supported.
If the handler fails, it fails and is never retried.
Note: K8s-event posting is skipped for `kopf.on.event` handlers,
as they should be silent. Still, the messages are logged normally.
"""
logger = cause.logger
handlers = registry.get_resource_watching_handlers(cause=cause)
for handler in handlers:
# The exceptions are handled locally and are not re-raised, to keep the operator running.
try:
logger.debug(f"Invoking handler {handler.id!r}.")
result = await _call_handler(
handler,
cause=cause,
lifecycle=lifecycle,
)
except Exception:
logger.exception(f"Handler {handler.id!r} failed with an exception. Will ignore.", local=True)
else:
logger.info(f"Handler {handler.id!r} succeeded.", local=True)
state.store_result(patch=cause.patch, handler=handler, result=result)
async def handle_resource_changing_cause(
lifecycle: lifecycles.LifeCycleFn,
registry: registries.OperatorRegistry,
cause: causation.ResourceChangingCause,
) -> Optional[float]:
"""
Handle a detected cause, as part of the bigger handler routine.
"""
logger = cause.logger
patch = cause.patch # TODO get rid of this alias
body = cause.body # TODO get rid of this alias
delay = None
done = None
skip = None
# Regular causes invoke the handlers.
if cause.reason in causation.HANDLER_REASONS:
title = causation.TITLES.get(cause.reason, repr(cause.reason))
logger.debug(f"{title.capitalize()} event: %r", body)
if cause.diff is not None and cause.old is not None and cause.new is not None:
logger.debug(f"{title.capitalize()} diff: %r", cause.diff)
handlers = registry.get_resource_changing_handlers(cause=cause)
if handlers:
try:
await _execute(
lifecycle=lifecycle,
handlers=handlers,
cause=cause,
)
except HandlerChildrenRetry as e:
# on the top-level, no patches -- it is pre-patched.
delay = e.delay
done = False
else:
logger.info(f"All handlers succeeded for {title}.")
done = True
else:
skip = True
# Regular causes also do some implicit post-handling when all handlers are done.
if done or skip:
extra_fields = registry.get_extra_fields(resource=cause.resource)
lastseen.refresh_essence(body=body, patch=patch, extra_fields=extra_fields)
if done:
state.purge_progress(body=body, patch=patch)
if cause.reason == causation.Reason.DELETE:
logger.debug("Removing the finalizer, thus allowing the actual deletion.")
finalizers.remove_finalizers(body=body, patch=patch)
# Informational causes just print the log lines.
if cause.reason == causation.Reason.GONE:
logger.debug("Deleted, really deleted, and we are notified.")
if cause.reason == causation.Reason.FREE:
logger.debug("Deletion event, but we are done with it, and we do not care.")
if cause.reason == causation.Reason.NOOP:
logger.debug("Something has changed, but we are not interested (state is the same).")
# For the case of a newly created object, or one that doesn't have the correct
# finalizers, lock it to this operator. Not all newly created objects will
# produce an 'ACQUIRE' causation event. This only happens when there are
# mandatory deletion handlers registered for the given object, i.e. if finalizers
# are required.
if cause.reason == causation.Reason.ACQUIRE:
logger.debug("Adding the finalizer, thus preventing the actual deletion.")
finalizers.append_finalizers(body=body, patch=patch)
# Remove finalizers from an object, since the object currently has finalizers, but
# shouldn't, thus releasing the locking of the object to this operator.
if cause.reason == causation.Reason.RELEASE:
logger.debug("Removing the finalizer, as there are no handlers requiring it.")
finalizers.remove_finalizers(body=body, patch=patch)
# The delay is then consumed by the main handling routine (in different ways).
return delay
async def execute(
*,
fns: Optional[Iterable[invocation.Invokable]] = None,
handlers: Optional[Iterable[registries.ResourceHandler]] = None,
registry: Optional[registries.ResourceRegistry] = None,
lifecycle: Optional[lifecycles.LifeCycleFn] = None,
cause: Optional[causation.BaseCause] = None,
) -> None:
"""
Execute the handlers in an isolated lifecycle.
This function is just a public wrapper for `execute` with multiple
ways to specify the handlers: either as the raw functions, or as the
pre-created handlers, or as a registry (as used in the object handling).
If no explicit functions or handlers or registry are passed,
the sub-handlers of the current handler are assumed, as accumulated
in the per-handler registry with ``@kopf.on.this``.
If the call to this method for the sub-handlers is not done explicitly
in the handler, it is done implicitly after the handler is exited.
One way or another, it is executed for the sub-handlers.
"""
# Restore the current context as set in the handler execution cycle.
lifecycle = lifecycle if lifecycle is not None else sublifecycle_var.get()
cause = cause if cause is not None else cause_var.get()
handler: registries.ResourceHandler = handler_var.get()
# Validate the inputs; the function signatures cannot put these kind of restrictions, so we do.
if len([v for v in [fns, handlers, registry] if v is not None]) > 1:
raise TypeError("Only one of the fns, handlers, registry can be passed. Got more.")
elif fns is not None and isinstance(fns, collections.abc.Mapping):
registry = registries.ResourceRegistry(prefix=handler.id if handler else None)
for id, fn in fns.items():
registry.register(fn=fn, id=id)
elif fns is not None and isinstance(fns, collections.abc.Iterable):
registry = registries.ResourceRegistry(prefix=handler.id if handler else None)
for fn in fns:
registry.register(fn=fn)
elif fns is not None:
raise ValueError(f"fns must be a mapping or an iterable, got {fns.__class__}.")
elif handlers is not None:
registry = registries.ResourceRegistry(prefix=handler.id if handler else None)
for handler in handlers:
registry.append(handler=handler)
# Use the registry as is; assume that the caller knows what they do.
elif registry is not None:
pass
# Prevent double implicit execution.
elif subexecuted_var.get():
return
# If no explicit args were passed, implicitly use the accumulated handlers from `@kopf.on.this`.
else:
subexecuted_var.set(True)
registry = subregistry_var.get()
# The sub-handlers are only for upper-level causes, not for lower-level events.
if not isinstance(cause, causation.ResourceChangingCause):
raise RuntimeError("Sub-handlers of event-handlers are not supported and have "
"no practical use (there are no retries or state tracking).")
# Execute the real handlers (all or few or one of them, as per the lifecycle).
# Raises `HandlerChildrenRetry` if the execute should be continued on the next iteration.
await _execute(
lifecycle=lifecycle,
handlers=registry.get_resource_changing_handlers(cause=cause),
cause=cause,
)
async def _execute(
lifecycle: lifecycles.LifeCycleFn,
handlers: Collection[registries.ResourceHandler],
cause: causation.BaseCause,
retry_on_errors: bool = True,
) -> None:
"""
Call the next handler(s) from the chain of the handlers.
Keep the record on the progression of the handlers in the object's status,
and use it on the next invocation to determined which handler(s) to call.
This routine is used both for the global handlers (via global registry),
and for the sub-handlers (via a simple registry of the current handler).
Raises `HandlerChildrenRetry` if there are children handlers to be executed
on the next call, and implicitly provokes such a call by making the changes
to the status fields (on the handler progression and number of retries).
Exits normally if all handlers for this cause are fully done.
"""
logger = cause.logger
# Filter and select the handlers to be executed right now, on this event reaction cycle.
handlers_done = [h for h in handlers if state.is_finished(body=cause.body, handler=h)]
handlers_wait = [h for h in handlers if state.is_sleeping(body=cause.body, handler=h)]
handlers_todo = [h for h in handlers if state.is_awakened(body=cause.body, handler=h)]
handlers_plan = [h for h in await invocation.invoke(lifecycle, handlers_todo, cause=cause)]
handlers_left = [h for h in handlers_todo if h.id not in {h.id for h in handlers_plan}]
# Set the timestamps -- even if not executed on this event, but just got registered.
for handler in handlers:
if not state.is_started(body=cause.body, handler=handler):
state.set_start_time(body=cause.body, patch=cause.patch, handler=handler)
# Execute all planned (selected) handlers in one event reaction cycle, even if there are few.
for handler in handlers_plan:
# Restore the handler's progress status. It can be useful in the handlers.
retry = state.get_retry_count(body=cause.body, handler=handler)
started = state.get_start_time(body=cause.body, handler=handler, patch=cause.patch)
runtime = datetime.datetime.utcnow() - (started if started else datetime.datetime.utcnow())
# The exceptions are handled locally and are not re-raised, to keep the operator running.
try:
logger.debug(f"Invoking handler {handler.id!r}.")
if handler.timeout is not None and runtime.total_seconds() > handler.timeout:
raise HandlerTimeoutError(f"Handler {handler.id!r} has timed out after {runtime}.")
result = await _call_handler(
handler,
cause=cause,
retry=retry,
started=started,
runtime=runtime,
lifecycle=lifecycle, # just a default for the sub-handlers, not used directly.
)
# Unfinished children cause the regular retry, but with less logging and event reporting.
except HandlerChildrenRetry as e:
logger.debug(f"Handler {handler.id!r} has unfinished sub-handlers. Will retry soon.")
state.set_retry_time(body=cause.body, patch=cause.patch, handler=handler, delay=e.delay)
handlers_left.append(handler)
# Definitely a temporary error, regardless of the error strictness.
except TemporaryError as e:
logger.error(f"Handler {handler.id!r} failed temporarily: %s", str(e) or repr(e))
state.set_retry_time(body=cause.body, patch=cause.patch, handler=handler, delay=e.delay)
handlers_left.append(handler)
# Same as permanent errors below, but with better logging for our internal cases.
except HandlerTimeoutError as e:
logger.error(f"%s", str(e) or repr(e)) # already formatted
state.store_failure(body=cause.body, patch=cause.patch, handler=handler, exc=e)
# TODO: report the handling failure somehow (beside logs/events). persistent status?
# Definitely a permanent error, regardless of the error strictness.
except PermanentError as e:
logger.error(f"Handler {handler.id!r} failed permanently: %s", str(e) or repr(e))
state.store_failure(body=cause.body, patch=cause.patch, handler=handler, exc=e)
# TODO: report the handling failure somehow (beside logs/events). persistent status?
# Regular errors behave as either temporary or permanent depending on the error strictness.
except Exception as e:
if retry_on_errors:
logger.exception(f"Handler {handler.id!r} failed with an exception. Will retry.")
state.set_retry_time(body=cause.body, patch=cause.patch, handler=handler, delay=DEFAULT_RETRY_DELAY)
handlers_left.append(handler)
else:
logger.exception(f"Handler {handler.id!r} failed with an exception. Will stop.")
state.store_failure(body=cause.body, patch=cause.patch, handler=handler, exc=e)
# TODO: report the handling failure somehow (beside logs/events). persistent status?
# No errors means the handler should be excluded from future runs in this reaction cycle.
else:
logger.info(f"Handler {handler.id!r} succeeded.")
state.store_success(body=cause.body, patch=cause.patch, handler=handler, result=result)
# Provoke the retry of the handling cycle if there were any unfinished handlers,
# either because they were not selected by the lifecycle, or failed and need a retry.
if handlers_left:
raise HandlerChildrenRetry(delay=None)
# If there are delayed handlers, block this object's cycle; but do keep-alives every few mins.
# Other (non-delayed) handlers will continue as normlally, due to raise few lines above.
# Other objects will continue as normally in their own handling asyncio tasks.
if handlers_wait:
now = datetime.datetime.utcnow()
limit = now + datetime.timedelta(seconds=WAITING_KEEPALIVE_INTERVAL)
times = [state.get_awake_time(body=cause.body, handler=h) for h in handlers_wait]
until = min([t for t in times if t is not None] + [limit]) # the soonest awake datetime.
delay = max(0, (until - now).total_seconds())
raise HandlerChildrenRetry(delay=delay)
async def _call_handler(
handler: registries.ResourceHandler,
*args: Any,
cause: causation.BaseCause,
lifecycle: lifecycles.LifeCycleFn,
**kwargs: Any,
) -> Any:
"""
Invoke one handler only, according to the calling conventions.
Specifically, calculate the handler-specific fields (e.g. field diffs).
Ensure the global context for this asyncio task is set to the handler and
its cause -- for proper population of the sub-handlers via the decorators
(see `@kopf.on.this`).
"""
# For the field-handlers, the old/new/diff values must match the field, not the whole object.
if isinstance(cause, causation.ResourceChangingCause) and handler.field is not None:
old = dicts.resolve(cause.old, handler.field, None, assume_empty=True)
new = dicts.resolve(cause.new, handler.field, None, assume_empty=True)
diff = diffs.reduce(cause.diff, handler.field)
cause = causation.enrich_cause(cause=cause, old=old, new=new, diff=diff)
# Store the context of the current resource-object-event-handler, to be used in `@kopf.on.this`,
# and maybe other places, and consumed in the recursive `execute()` calls for the children.
# This replaces the multiple kwargs passing through the whole call stack (easy to forget).
with invocation.context([
(sublifecycle_var, lifecycle),
(subregistry_var, registries.ResourceRegistry(prefix=handler.id)),
(subexecuted_var, False),
(handler_var, handler),
(cause_var, cause),
]):
# And call it. If the sub-handlers are not called explicitly, run them implicitly
# as if it was done inside of the handler (i.e. under try-finally block).
result = await invocation.invoke(
handler.fn,
*args,
cause=cause,
**kwargs,
)
if not subexecuted_var.get() and isinstance(cause, causation.ResourceChangingCause):
await execute()
return result
| 44.357553
| 116
| 0.687357
|
"""
Conversion of low-level events to high-level causes, and handling them.
These functions are invoked from the queueing module `kopf.reactor.queueing`,
which are the actual event loop of the operator process.
The conversion of the low-level events to the high-level causes is done by
checking the object's state and comparing it to the preserved last-seen state.
The framework itself makes the necessary changes to the object, -- such as the
finalizers attachment, last-seen state updates, and handler status tracking, --
thus provoking the low-level watch-events and additional queueing calls.
But these internal changes are filtered out from the cause detection
and therefore do not trigger the user-defined handlers.
"""
import asyncio
import collections.abc
import datetime
from contextvars import ContextVar
from typing import Optional, Iterable, Collection, Any
from kopf.clients import patching
from kopf.engines import logging as logging_engine
from kopf.engines import posting
from kopf.engines import sleeping
from kopf.reactor import causation
from kopf.reactor import invocation
from kopf.reactor import lifecycles
from kopf.reactor import registries
from kopf.reactor import state
from kopf.structs import bodies
from kopf.structs import dicts
from kopf.structs import diffs
from kopf.structs import finalizers
from kopf.structs import lastseen
from kopf.structs import patches
from kopf.structs import resources
WAITING_KEEPALIVE_INTERVAL = 10 * 60
""" How often to wake up from the long sleep, to show the liveliness. """
DEFAULT_RETRY_DELAY = 1 * 60
""" The default delay duration for the regular exception in retry-mode. """
class PermanentError(Exception):
""" A fatal handler error, the retries are useless. """
class TemporaryError(Exception):
""" A potentially recoverable error, should be retried. """
def __init__(
self,
__msg: Optional[str] = None,
delay: Optional[float] = DEFAULT_RETRY_DELAY,
):
super().__init__(__msg)
self.delay = delay
class HandlerTimeoutError(PermanentError):
""" An error for the handler's timeout (if set). """
class HandlerChildrenRetry(TemporaryError):
""" An internal pseudo-error to retry for the next sub-handlers attempt. """
# The task-local context; propagated down the stack instead of multiple kwargs.
# Used in `@kopf.on.this` and `kopf.execute()` to add/get the sub-handlers.
sublifecycle_var: ContextVar[lifecycles.LifeCycleFn] = ContextVar('sublifecycle_var')
subregistry_var: ContextVar[registries.ResourceRegistry] = ContextVar('subregistry_var')
subexecuted_var: ContextVar[bool] = ContextVar('subexecuted_var')
handler_var: ContextVar[registries.ResourceHandler] = ContextVar('handler_var')
cause_var: ContextVar[causation.BaseCause] = ContextVar('cause_var')
async def resource_handler(
lifecycle: lifecycles.LifeCycleFn,
registry: registries.OperatorRegistry,
resource: resources.Resource,
event: bodies.Event,
freeze: asyncio.Event,
replenished: asyncio.Event,
event_queue: posting.K8sEventQueue,
) -> None:
"""
Handle a single custom object low-level watch-event.
Convert the low-level events, as provided by the watching/queueing tasks,
to the high-level causes, and then call the cause-handling logic.
All the internally provoked changes are intercepted, do not create causes,
and therefore do not call the handling logic.
"""
body: bodies.Body = event['object']
patch: patches.Patch = patches.Patch()
delay: Optional[float] = None
# Each object has its own prefixed logger, to distinguish parallel handling.
logger = logging_engine.ObjectLogger(body=body)
posting.event_queue_loop_var.set(asyncio.get_running_loop())
posting.event_queue_var.set(event_queue) # till the end of this object's task.
# If the global freeze is set for the processing (i.e. other operator overrides), do nothing.
if freeze.is_set():
logger.debug("Ignoring the events due to freeze.")
return
# Invoke all silent spies. No causation, no progress storage is performed.
if registry.has_resource_watching_handlers(resource=resource):
resource_watching_cause = causation.detect_resource_watching_cause(
event=event,
resource=resource,
logger=logger,
patch=patch,
)
await handle_resource_watching_cause(
lifecycle=lifecycles.all_at_once,
registry=registry,
cause=resource_watching_cause,
)
# Object patch accumulator. Populated by the methods. Applied in the end of the handler.
# Detect the cause and handle it (or at least log this happened).
if registry.has_resource_changing_handlers(resource=resource):
extra_fields = registry.get_extra_fields(resource=resource)
old, new, diff = lastseen.get_essential_diffs(body=body, extra_fields=extra_fields)
resource_changing_cause = causation.detect_resource_changing_cause(
event=event,
resource=resource,
logger=logger,
patch=patch,
old=old,
new=new,
diff=diff,
requires_finalizer=registry.requires_finalizer(resource=resource, body=body),
)
delay = await handle_resource_changing_cause(
lifecycle=lifecycle,
registry=registry,
cause=resource_changing_cause,
)
# Whatever was done, apply the accumulated changes to the object.
# But only once, to reduce the number of API calls and the generated irrelevant events.
if patch:
logger.debug("Patching with: %r", patch)
await patching.patch_obj(resource=resource, patch=patch, body=body)
# Sleep strictly after patching, never before -- to keep the status proper.
# The patching above, if done, interrupts the sleep instantly, so we skip it at all.
if delay and not patch:
logger.debug(f"Sleeping for {delay} seconds for the delayed handlers.")
unslept = await sleeping.sleep_or_wait(delay, replenished)
if unslept is not None:
logger.debug(f"Sleeping was interrupted by new changes, {unslept} seconds left.")
else:
now = datetime.datetime.utcnow()
dummy = patches.Patch({'status': {'kopf': {'dummy': now.isoformat()}}})
logger.debug("Provoking reaction with: %r", dummy)
await patching.patch_obj(resource=resource, patch=dummy, body=body)
async def handle_resource_watching_cause(
lifecycle: lifecycles.LifeCycleFn,
registry: registries.OperatorRegistry,
cause: causation.ResourceWatchingCause,
) -> None:
"""
Handle a received event, log but ignore all errors.
This is a lightweight version of the cause handling, but for the raw events,
without any progress persistence. Multi-step calls are also not supported.
If the handler fails, it fails and is never retried.
Note: K8s-event posting is skipped for `kopf.on.event` handlers,
as they should be silent. Still, the messages are logged normally.
"""
logger = cause.logger
handlers = registry.get_resource_watching_handlers(cause=cause)
for handler in handlers:
# The exceptions are handled locally and are not re-raised, to keep the operator running.
try:
logger.debug(f"Invoking handler {handler.id!r}.")
result = await _call_handler(
handler,
cause=cause,
lifecycle=lifecycle,
)
except Exception:
logger.exception(f"Handler {handler.id!r} failed with an exception. Will ignore.", local=True)
else:
logger.info(f"Handler {handler.id!r} succeeded.", local=True)
state.store_result(patch=cause.patch, handler=handler, result=result)
async def handle_resource_changing_cause(
lifecycle: lifecycles.LifeCycleFn,
registry: registries.OperatorRegistry,
cause: causation.ResourceChangingCause,
) -> Optional[float]:
"""
Handle a detected cause, as part of the bigger handler routine.
"""
logger = cause.logger
patch = cause.patch # TODO get rid of this alias
body = cause.body # TODO get rid of this alias
delay = None
done = None
skip = None
# Regular causes invoke the handlers.
if cause.reason in causation.HANDLER_REASONS:
title = causation.TITLES.get(cause.reason, repr(cause.reason))
logger.debug(f"{title.capitalize()} event: %r", body)
if cause.diff is not None and cause.old is not None and cause.new is not None:
logger.debug(f"{title.capitalize()} diff: %r", cause.diff)
handlers = registry.get_resource_changing_handlers(cause=cause)
if handlers:
try:
await _execute(
lifecycle=lifecycle,
handlers=handlers,
cause=cause,
)
except HandlerChildrenRetry as e:
# on the top-level, no patches -- it is pre-patched.
delay = e.delay
done = False
else:
logger.info(f"All handlers succeeded for {title}.")
done = True
else:
skip = True
# Regular causes also do some implicit post-handling when all handlers are done.
if done or skip:
extra_fields = registry.get_extra_fields(resource=cause.resource)
lastseen.refresh_essence(body=body, patch=patch, extra_fields=extra_fields)
if done:
state.purge_progress(body=body, patch=patch)
if cause.reason == causation.Reason.DELETE:
logger.debug("Removing the finalizer, thus allowing the actual deletion.")
finalizers.remove_finalizers(body=body, patch=patch)
# Informational causes just print the log lines.
if cause.reason == causation.Reason.GONE:
logger.debug("Deleted, really deleted, and we are notified.")
if cause.reason == causation.Reason.FREE:
logger.debug("Deletion event, but we are done with it, and we do not care.")
if cause.reason == causation.Reason.NOOP:
logger.debug("Something has changed, but we are not interested (state is the same).")
# For the case of a newly created object, or one that doesn't have the correct
# finalizers, lock it to this operator. Not all newly created objects will
# produce an 'ACQUIRE' causation event. This only happens when there are
# mandatory deletion handlers registered for the given object, i.e. if finalizers
# are required.
if cause.reason == causation.Reason.ACQUIRE:
logger.debug("Adding the finalizer, thus preventing the actual deletion.")
finalizers.append_finalizers(body=body, patch=patch)
# Remove finalizers from an object, since the object currently has finalizers, but
# shouldn't, thus releasing the locking of the object to this operator.
if cause.reason == causation.Reason.RELEASE:
logger.debug("Removing the finalizer, as there are no handlers requiring it.")
finalizers.remove_finalizers(body=body, patch=patch)
# The delay is then consumed by the main handling routine (in different ways).
return delay
async def execute(
*,
fns: Optional[Iterable[invocation.Invokable]] = None,
handlers: Optional[Iterable[registries.ResourceHandler]] = None,
registry: Optional[registries.ResourceRegistry] = None,
lifecycle: Optional[lifecycles.LifeCycleFn] = None,
cause: Optional[causation.BaseCause] = None,
) -> None:
"""
Execute the handlers in an isolated lifecycle.
This function is just a public wrapper for `execute` with multiple
ways to specify the handlers: either as the raw functions, or as the
pre-created handlers, or as a registry (as used in the object handling).
If no explicit functions or handlers or registry are passed,
the sub-handlers of the current handler are assumed, as accumulated
in the per-handler registry with ``@kopf.on.this``.
If the call to this method for the sub-handlers is not done explicitly
in the handler, it is done implicitly after the handler is exited.
One way or another, it is executed for the sub-handlers.
"""
# Restore the current context as set in the handler execution cycle.
lifecycle = lifecycle if lifecycle is not None else sublifecycle_var.get()
cause = cause if cause is not None else cause_var.get()
handler: registries.ResourceHandler = handler_var.get()
# Validate the inputs; the function signatures cannot put these kind of restrictions, so we do.
if len([v for v in [fns, handlers, registry] if v is not None]) > 1:
raise TypeError("Only one of the fns, handlers, registry can be passed. Got more.")
elif fns is not None and isinstance(fns, collections.abc.Mapping):
registry = registries.ResourceRegistry(prefix=handler.id if handler else None)
for id, fn in fns.items():
registry.register(fn=fn, id=id)
elif fns is not None and isinstance(fns, collections.abc.Iterable):
registry = registries.ResourceRegistry(prefix=handler.id if handler else None)
for fn in fns:
registry.register(fn=fn)
elif fns is not None:
raise ValueError(f"fns must be a mapping or an iterable, got {fns.__class__}.")
elif handlers is not None:
registry = registries.ResourceRegistry(prefix=handler.id if handler else None)
for handler in handlers:
registry.append(handler=handler)
# Use the registry as is; assume that the caller knows what they do.
elif registry is not None:
pass
# Prevent double implicit execution.
elif subexecuted_var.get():
return
# If no explicit args were passed, implicitly use the accumulated handlers from `@kopf.on.this`.
else:
subexecuted_var.set(True)
registry = subregistry_var.get()
# The sub-handlers are only for upper-level causes, not for lower-level events.
if not isinstance(cause, causation.ResourceChangingCause):
raise RuntimeError("Sub-handlers of event-handlers are not supported and have "
"no practical use (there are no retries or state tracking).")
# Execute the real handlers (all or few or one of them, as per the lifecycle).
# Raises `HandlerChildrenRetry` if the execute should be continued on the next iteration.
await _execute(
lifecycle=lifecycle,
handlers=registry.get_resource_changing_handlers(cause=cause),
cause=cause,
)
async def _execute(
lifecycle: lifecycles.LifeCycleFn,
handlers: Collection[registries.ResourceHandler],
cause: causation.BaseCause,
retry_on_errors: bool = True,
) -> None:
"""
Call the next handler(s) from the chain of the handlers.
Keep the record on the progression of the handlers in the object's status,
and use it on the next invocation to determined which handler(s) to call.
This routine is used both for the global handlers (via global registry),
and for the sub-handlers (via a simple registry of the current handler).
Raises `HandlerChildrenRetry` if there are children handlers to be executed
on the next call, and implicitly provokes such a call by making the changes
to the status fields (on the handler progression and number of retries).
Exits normally if all handlers for this cause are fully done.
"""
logger = cause.logger
# Filter and select the handlers to be executed right now, on this event reaction cycle.
handlers_done = [h for h in handlers if state.is_finished(body=cause.body, handler=h)]
handlers_wait = [h for h in handlers if state.is_sleeping(body=cause.body, handler=h)]
handlers_todo = [h for h in handlers if state.is_awakened(body=cause.body, handler=h)]
handlers_plan = [h for h in await invocation.invoke(lifecycle, handlers_todo, cause=cause)]
handlers_left = [h for h in handlers_todo if h.id not in {h.id for h in handlers_plan}]
# Set the timestamps -- even if not executed on this event, but just got registered.
for handler in handlers:
if not state.is_started(body=cause.body, handler=handler):
state.set_start_time(body=cause.body, patch=cause.patch, handler=handler)
# Execute all planned (selected) handlers in one event reaction cycle, even if there are few.
for handler in handlers_plan:
# Restore the handler's progress status. It can be useful in the handlers.
retry = state.get_retry_count(body=cause.body, handler=handler)
started = state.get_start_time(body=cause.body, handler=handler, patch=cause.patch)
runtime = datetime.datetime.utcnow() - (started if started else datetime.datetime.utcnow())
# The exceptions are handled locally and are not re-raised, to keep the operator running.
try:
logger.debug(f"Invoking handler {handler.id!r}.")
if handler.timeout is not None and runtime.total_seconds() > handler.timeout:
raise HandlerTimeoutError(f"Handler {handler.id!r} has timed out after {runtime}.")
result = await _call_handler(
handler,
cause=cause,
retry=retry,
started=started,
runtime=runtime,
lifecycle=lifecycle, # just a default for the sub-handlers, not used directly.
)
# Unfinished children cause the regular retry, but with less logging and event reporting.
except HandlerChildrenRetry as e:
logger.debug(f"Handler {handler.id!r} has unfinished sub-handlers. Will retry soon.")
state.set_retry_time(body=cause.body, patch=cause.patch, handler=handler, delay=e.delay)
handlers_left.append(handler)
# Definitely a temporary error, regardless of the error strictness.
except TemporaryError as e:
logger.error(f"Handler {handler.id!r} failed temporarily: %s", str(e) or repr(e))
state.set_retry_time(body=cause.body, patch=cause.patch, handler=handler, delay=e.delay)
handlers_left.append(handler)
# Same as permanent errors below, but with better logging for our internal cases.
except HandlerTimeoutError as e:
logger.error(f"%s", str(e) or repr(e)) # already formatted
state.store_failure(body=cause.body, patch=cause.patch, handler=handler, exc=e)
# TODO: report the handling failure somehow (beside logs/events). persistent status?
# Definitely a permanent error, regardless of the error strictness.
except PermanentError as e:
logger.error(f"Handler {handler.id!r} failed permanently: %s", str(e) or repr(e))
state.store_failure(body=cause.body, patch=cause.patch, handler=handler, exc=e)
# TODO: report the handling failure somehow (beside logs/events). persistent status?
# Regular errors behave as either temporary or permanent depending on the error strictness.
except Exception as e:
if retry_on_errors:
logger.exception(f"Handler {handler.id!r} failed with an exception. Will retry.")
state.set_retry_time(body=cause.body, patch=cause.patch, handler=handler, delay=DEFAULT_RETRY_DELAY)
handlers_left.append(handler)
else:
logger.exception(f"Handler {handler.id!r} failed with an exception. Will stop.")
state.store_failure(body=cause.body, patch=cause.patch, handler=handler, exc=e)
# TODO: report the handling failure somehow (beside logs/events). persistent status?
# No errors means the handler should be excluded from future runs in this reaction cycle.
else:
logger.info(f"Handler {handler.id!r} succeeded.")
state.store_success(body=cause.body, patch=cause.patch, handler=handler, result=result)
# Provoke the retry of the handling cycle if there were any unfinished handlers,
# either because they were not selected by the lifecycle, or failed and need a retry.
if handlers_left:
raise HandlerChildrenRetry(delay=None)
# If there are delayed handlers, block this object's cycle; but do keep-alives every few mins.
# Other (non-delayed) handlers will continue as normlally, due to raise few lines above.
# Other objects will continue as normally in their own handling asyncio tasks.
if handlers_wait:
now = datetime.datetime.utcnow()
limit = now + datetime.timedelta(seconds=WAITING_KEEPALIVE_INTERVAL)
times = [state.get_awake_time(body=cause.body, handler=h) for h in handlers_wait]
until = min([t for t in times if t is not None] + [limit]) # the soonest awake datetime.
delay = max(0, (until - now).total_seconds())
raise HandlerChildrenRetry(delay=delay)
async def _call_handler(
handler: registries.ResourceHandler,
*args: Any,
cause: causation.BaseCause,
lifecycle: lifecycles.LifeCycleFn,
**kwargs: Any,
) -> Any:
"""
Invoke one handler only, according to the calling conventions.
Specifically, calculate the handler-specific fields (e.g. field diffs).
Ensure the global context for this asyncio task is set to the handler and
its cause -- for proper population of the sub-handlers via the decorators
(see `@kopf.on.this`).
"""
# For the field-handlers, the old/new/diff values must match the field, not the whole object.
if isinstance(cause, causation.ResourceChangingCause) and handler.field is not None:
old = dicts.resolve(cause.old, handler.field, None, assume_empty=True)
new = dicts.resolve(cause.new, handler.field, None, assume_empty=True)
diff = diffs.reduce(cause.diff, handler.field)
cause = causation.enrich_cause(cause=cause, old=old, new=new, diff=diff)
# Store the context of the current resource-object-event-handler, to be used in `@kopf.on.this`,
# and maybe other places, and consumed in the recursive `execute()` calls for the children.
# This replaces the multiple kwargs passing through the whole call stack (easy to forget).
with invocation.context([
(sublifecycle_var, lifecycle),
(subregistry_var, registries.ResourceRegistry(prefix=handler.id)),
(subexecuted_var, False),
(handler_var, handler),
(cause_var, cause),
]):
# And call it. If the sub-handlers are not called explicitly, run them implicitly
# as if it was done inside of the handler (i.e. under try-finally block).
result = await invocation.invoke(
handler.fn,
*args,
cause=cause,
**kwargs,
)
if not subexecuted_var.get() and isinstance(cause, causation.ResourceChangingCause):
await execute()
return result
| 175
| 0
| 26
|
2bb069623fadcbb035f2202aa16c63ec3c42a1ed
| 448
|
py
|
Python
|
snipy/scope_factory.py
|
dade-u/snipy
|
408520867179f99b3158b57520e2619f3fecd69b
|
[
"MIT"
] | 1
|
2017-08-09T09:29:22.000Z
|
2017-08-09T09:29:22.000Z
|
snipy/scope_factory.py
|
dade-u/snipy
|
408520867179f99b3158b57520e2619f3fecd69b
|
[
"MIT"
] | null | null | null |
snipy/scope_factory.py
|
dade-u/snipy
|
408520867179f99b3158b57520e2619f3fecd69b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from contextlib import contextmanager
# from snipy.odict import odict
_stack = [None]
@contextmanager
| 14.933333
| 37
| 0.631696
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
# from snipy.odict import odict
_stack = [None]
class Scope(object):
def __init__(self, name):
self.name = name
@contextmanager
def vscope(name):
# get current scope or new scope
# append hierachical information
sc = Scope(name)
_stack.append(sc)
try:
yield sc
finally:
_stack.pop(-1)
def get_scope():
return _stack[-1]
| 223
| -1
| 94
|
10334a7fe4170894486c6af95188eedeca86ef92
| 55
|
py
|
Python
|
listener/normal/custom/__init__.py
|
andymckay/arecibo
|
eb6787ea0a276047ef5add2df67a4dd051e5c961
|
[
"Apache-2.0"
] | 6
|
2016-01-26T04:47:52.000Z
|
2022-01-24T19:55:04.000Z
|
listener/normal/custom/__init__.py
|
andymckay/arecibo
|
eb6787ea0a276047ef5add2df67a4dd051e5c961
|
[
"Apache-2.0"
] | 6
|
2017-02-12T05:11:25.000Z
|
2017-02-12T05:12:15.000Z
|
listener/normal/custom/__init__.py
|
andymckay/arecibo
|
eb6787ea0a276047ef5add2df67a4dd051e5c961
|
[
"Apache-2.0"
] | 2
|
2015-12-09T22:37:58.000Z
|
2021-09-09T17:04:33.000Z
|
try:
import listeners
except ImportError:
pass
| 11
| 20
| 0.709091
|
try:
import listeners
except ImportError:
pass
| 0
| 0
| 0
|
bb668dcb39e4acf30a4628fb7b8d11525437ea6c
| 1,212
|
py
|
Python
|
setup.py
|
philip-hash/MTN
|
ce9b3bf585ceb8342261313ab527ffbcdf34c0b5
|
[
"MIT"
] | null | null | null |
setup.py
|
philip-hash/MTN
|
ce9b3bf585ceb8342261313ab527ffbcdf34c0b5
|
[
"MIT"
] | null | null | null |
setup.py
|
philip-hash/MTN
|
ce9b3bf585ceb8342261313ab527ffbcdf34c0b5
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages, Extension
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
VERSION = '0.0.6'
DESCRIPTION = 'Library used to Normalize numerical time-series data'
LONG_DESCRIPTION = 'A package that allows to efficiently normalize numerical timeseries data without lossing long term memory dependency and to retain information from the data as possible.'
# Setting up
setup(
name="MRN",
version=VERSION,
author="Philip Pankaj",
author_email="<philip.pankaj@gmail.com>",
description=DESCRIPTION,
long_description_content_type='text/markdown',
long_description=long_description,
packages=find_packages(),
install_requires=['numpy'],
keywords=['python', 'normalization', 'standardization', 'long term memory', 'LTM', 'LTM Normalization'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
| 37.875
| 191
| 0.688944
|
from setuptools import setup, find_packages, Extension
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
VERSION = '0.0.6'
DESCRIPTION = 'Library used to Normalize numerical time-series data'
LONG_DESCRIPTION = 'A package that allows to efficiently normalize numerical timeseries data without lossing long term memory dependency and to retain information from the data as possible.'
# Setting up
setup(
name="MRN",
version=VERSION,
author="Philip Pankaj",
author_email="<philip.pankaj@gmail.com>",
description=DESCRIPTION,
long_description_content_type='text/markdown',
long_description=long_description,
packages=find_packages(),
install_requires=['numpy'],
keywords=['python', 'normalization', 'standardization', 'long term memory', 'LTM', 'LTM Normalization'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
| 0
| 0
| 0
|
ed4d6ecf028e692369dd51a8ba6206a33a31e1bf
| 536
|
py
|
Python
|
app/grandchallenge/hanging_protocols/urls.py
|
comic/comic-django
|
4f534fae2c7d2102e94991667398aef12394e32e
|
[
"Apache-2.0"
] | 7
|
2016-11-05T07:16:30.000Z
|
2017-11-23T03:38:03.000Z
|
app/grandchallenge/hanging_protocols/urls.py
|
comic/comic-django
|
4f534fae2c7d2102e94991667398aef12394e32e
|
[
"Apache-2.0"
] | 113
|
2015-05-26T09:27:59.000Z
|
2018-03-21T10:45:56.000Z
|
app/grandchallenge/hanging_protocols/urls.py
|
comic/comic-django
|
4f534fae2c7d2102e94991667398aef12394e32e
|
[
"Apache-2.0"
] | 7
|
2015-07-16T20:11:22.000Z
|
2017-06-06T02:41:24.000Z
|
from django.urls import path
from grandchallenge.hanging_protocols.views import (
HangingProtocolCreate,
HangingProtocolDetail,
HangingProtocolList,
HangingProtocolUpdate,
)
app_name = "hanging-protocols"
urlpatterns = [
path("", HangingProtocolList.as_view(), name="list"),
path("create/", HangingProtocolCreate.as_view(), name="create"),
path("<slug:slug>/", HangingProtocolDetail.as_view(), name="detail"),
path("<slug>/update/", HangingProtocolUpdate.as_view(), name="update"),
]
| 29.777778
| 76
| 0.699627
|
from django.urls import path
from grandchallenge.hanging_protocols.views import (
HangingProtocolCreate,
HangingProtocolDetail,
HangingProtocolList,
HangingProtocolUpdate,
)
app_name = "hanging-protocols"
urlpatterns = [
path("", HangingProtocolList.as_view(), name="list"),
path("create/", HangingProtocolCreate.as_view(), name="create"),
path("<slug:slug>/", HangingProtocolDetail.as_view(), name="detail"),
path("<slug>/update/", HangingProtocolUpdate.as_view(), name="update"),
]
| 0
| 0
| 0
|
4850fded809f5c4c03a4fe3d475ebddb547c8910
| 12,026
|
py
|
Python
|
train.py
|
taowenyin/PatchNetVLAD
|
b0f63c8de7677bc27c8fa7760c1bdfb89bc0cfec
|
[
"MIT"
] | 1
|
2021-08-10T02:24:00.000Z
|
2021-08-10T02:24:00.000Z
|
train.py
|
taowenyin/PatchNetVLAD
|
b0f63c8de7677bc27c8fa7760c1bdfb89bc0cfec
|
[
"MIT"
] | null | null | null |
train.py
|
taowenyin/PatchNetVLAD
|
b0f63c8de7677bc27c8fa7760c1bdfb89bc0cfec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
MIT License
Copyright (c) 2021 Stephen Hausler, Sourav Garg, Ming Xu, Michael Milford and Tobias Fischer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Significant parts of our code are based on [Nanne's pytorch-netvlad repository]
(https://github.com/Nanne/pytorch-NetVlad/), as well as some parts from the [Mapillary SLS repository]
(https://github.com/mapillary/mapillary_sls)
This code trains the NetVLAD neural network used to extract Patch-NetVLAD features.
'''
from __future__ import print_function
import argparse
import configparser
import os
import random
import shutil
from os.path import join, isfile
from os import makedirs
from datetime import datetime
import tempfile
import torch
import torch.nn as nn
import torch.optim as optim
import h5py
from tensorboardX import SummaryWriter
import numpy as np
from patchnetvlad.training_tools.train_epoch import train_epoch
from patchnetvlad.training_tools.val import val
from patchnetvlad.training_tools.get_clusters import get_clusters
from patchnetvlad.training_tools.tools import save_checkpoint
from patchnetvlad.tools.datasets import input_transform
from patchnetvlad.models.models_generic import get_backend, get_model
from patchnetvlad.tools import PATCHNETVLAD_ROOT_DIR
from tqdm.auto import trange
from patchnetvlad.training_tools.msls import MSLS
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Patch-NetVLAD-train')
parser.add_argument('--config_path', type=str, default=join(PATCHNETVLAD_ROOT_DIR, 'configs/train.ini'),
help='File name (with extension) to an ini file that stores most of the configuration data for patch-netvlad')
parser.add_argument('--cache_path', type=str, default=tempfile.mkdtemp(),
help='Path to save cache, centroid data to.')
parser.add_argument('--save_path', type=str, default='',
help='Path to save checkpoints to')
parser.add_argument('--resume_path', type=str, default='',
help='Full path and name (with extension) to load checkpoint from, for resuming training.')
parser.add_argument('--cluster_path', type=str, default='',
help='Full path and name (with extension) to load cluster data from, for resuming training.')
parser.add_argument('--dataset_root_dir', type=str, default='/work/qvpr/data/raw/Mapillary_Street_Level_Sequences',
help='Root directory of dataset')
parser.add_argument('--identifier', type=str, default='mapillary_nopanos',
help='Description of this model, e.g. mapillary_nopanos_vgg16_netvlad')
parser.add_argument('--nEpochs', type=int, default=30, help='number of epochs to train for')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--save_every_epoch', action='store_true', help='Flag to set a separate checkpoint file for each new epoch')
parser.add_argument('--threads', type=int, default=6, help='Number of threads for each data loader to use')
parser.add_argument('--nocuda', action='store_true', help='If true, use CPU only. Else use GPU.')
# os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
opt = parser.parse_args()
print(opt)
configfile = opt.config_path
assert os.path.isfile(configfile)
config = configparser.ConfigParser()
config.read(configfile)
cuda = not opt.nocuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run with --nocuda")
device = torch.device("cuda" if cuda else "cpu")
random.seed(int(config['train']['seed']))
np.random.seed(int(config['train']['seed']))
torch.manual_seed(int(config['train']['seed']))
if cuda:
# noinspection PyUnresolvedReferences
torch.cuda.manual_seed(int(config['train']['seed']))
optimizer = None
scheduler = None
print('===> Building model')
encoder_dim, encoder = get_backend()
if opt.resume_path: # if already started training earlier and continuing
if isfile(opt.resume_path):
print("=> loading checkpoint '{}'".format(opt.resume_path))
checkpoint = torch.load(opt.resume_path, map_location=lambda storage, loc: storage)
config['global_params']['num_clusters'] = str(checkpoint['state_dict']['pool.centroids'].shape[0])
model = get_model(encoder, encoder_dim, opt, config['global_params'], append_pca_layer=False)
model.load_state_dict(checkpoint['state_dict'])
opt.start_epoch = checkpoint['epoch']
print("=> loaded checkpoint '{}'".format(opt.resume_path, ))
else:
raise FileNotFoundError("=> no checkpoint found at '{}'".format(opt.resume_path))
else: # if not, assume fresh training instance and will initially generate cluster centroids
print('===> Loading model')
config['global_params']['num_clusters'] = config['train']['num_clusters']
model = get_model(encoder, encoder_dim, opt, config['global_params'], append_pca_layer=False)
initcache = join(opt.cache_path, 'centroids', 'vgg16_' + 'mapillary_' + config['train'][
'num_clusters'] + '_desc_cen.hdf5')
if opt.cluster_path:
if isfile(opt.cluster_path):
if opt.cluster_path != initcache:
shutil.copyfile(opt.cluster_path, initcache)
else:
raise FileNotFoundError("=> no cluster data found at '{}'".format(opt.cluster_path))
else:
print('===> Finding cluster centroids')
print('===> Loading dataset(s) for clustering')
train_dataset = MSLS(opt.dataset_root_dir, mode='test', cities='train', transform=input_transform(),
bs=int(config['train']['cachebatchsize']), threads=opt.threads,
margin=float(config['train']['margin']))
model = model.to(device)
print('===> Calculating descriptors and clusters')
get_clusters(train_dataset, model, encoder_dim, device, opt, config)
# 复制文件
shutil.copyfile(initcache, initcache + '.bk')
os.remove(initcache)
os.rename(initcache + '.bk', initcache)
# a little hacky, but needed to easily run init_params
model = model.to(device="cpu")
with h5py.File(initcache, mode='r') as h5:
clsts = h5.get("centroids")[...]
traindescs = h5.get("descriptors")[...]
model.pool.init_params(clsts, traindescs)
del clsts, traindescs
isParallel = False
if int(config['global_params']['nGPU']) > 1 and torch.cuda.device_count() > 1:
model.encoder = nn.DataParallel(model.encoder)
model.pool = nn.DataParallel(model.pool)
isParallel = True
if config['train']['optim'] == 'ADAM':
optimizer = optim.Adam(filter(lambda par: par.requires_grad,
model.parameters()), lr=float(config['train']['lr'])) # , betas=(0,0.9))
elif config['train']['optim'] == 'SGD':
optimizer = optim.SGD(filter(lambda par: par.requires_grad,
model.parameters()), lr=float(config['train']['lr']),
momentum=float(config['train']['momentum']),
weight_decay=float(config['train']['weightDecay']))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=int(config['train']['lrstep']),
gamma=float(config['train']['lrgamma']))
else:
raise ValueError('Unknown optimizer: ' + config['train']['optim'])
criterion = nn.TripletMarginLoss(margin=float(config['train']['margin']) ** 0.5, p=2, reduction='sum').to(device)
model = model.to(device)
if opt.resume_path:
optimizer.load_state_dict(checkpoint['optimizer'])
print('===> Loading dataset(s)')
exlude_panos_training = not config['train'].getboolean('includepanos')
train_dataset = MSLS(opt.dataset_root_dir, mode='train', nNeg=int(config['train']['nNeg']), transform=input_transform(),
bs=int(config['train']['cachebatchsize']), threads=opt.threads, margin=float(config['train']['margin']),
exclude_panos=exlude_panos_training)
validation_dataset = MSLS(opt.dataset_root_dir, mode='val', transform=input_transform(),
bs=int(config['train']['cachebatchsize']), threads=opt.threads,
margin=float(config['train']['margin']), posDistThr=25)
print('===> Training query set:', len(train_dataset.qIdx))
print('===> Evaluating on val set, query count:', len(validation_dataset.qIdx))
print('===> Training model')
writer = SummaryWriter(
log_dir=join(opt.save_path, datetime.now().strftime('%b%d_%H-%M-%S') + '_' + opt.identifier))
# write checkpoints in logdir
logdir = writer.file_writer.get_logdir()
opt.save_file_path = join(logdir, 'checkpoints')
makedirs(opt.save_file_path)
not_improved = 0
best_score = 0
if opt.resume_path:
not_improved = checkpoint['not_improved']
best_score = checkpoint['best_score']
for epoch in trange(opt.start_epoch + 1, opt.nEpochs + 1, desc='Epoch number'.rjust(15), position=0):
train_epoch(train_dataset, model, optimizer, criterion, encoder_dim, device, epoch, opt, config, writer)
if scheduler is not None:
scheduler.step(epoch)
if (epoch % int(config['train']['evalevery'])) == 0:
recalls = val(validation_dataset, model, encoder_dim, device, opt, config, writer, epoch,
write_tboard=True, pbar_position=1)
is_best = recalls[5] > best_score
if is_best:
not_improved = 0
best_score = recalls[5]
else:
not_improved += 1
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'recalls': recalls,
'best_score': best_score,
'not_improved': not_improved,
'optimizer': optimizer.state_dict(),
'parallel': isParallel,
}, opt, is_best)
if int(config['train']['patience']) > 0 and not_improved > (int(config['train']['patience']) / int(config['train']['evalevery'])):
print('Performance did not improve for', config['train']['patience'], 'epochs. Stopping.')
break
print("=> Best Recall@5: {:.4f}".format(best_score), flush=True)
writer.close()
torch.cuda.empty_cache() # garbage clean GPU memory, a bug can occur when Pytorch doesn't automatically clear the
# memory after runs
print('Done')
| 45.041199
| 142
| 0.651339
|
#!/usr/bin/env python
'''
MIT License
Copyright (c) 2021 Stephen Hausler, Sourav Garg, Ming Xu, Michael Milford and Tobias Fischer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Significant parts of our code are based on [Nanne's pytorch-netvlad repository]
(https://github.com/Nanne/pytorch-NetVlad/), as well as some parts from the [Mapillary SLS repository]
(https://github.com/mapillary/mapillary_sls)
This code trains the NetVLAD neural network used to extract Patch-NetVLAD features.
'''
from __future__ import print_function
import argparse
import configparser
import os
import random
import shutil
from os.path import join, isfile
from os import makedirs
from datetime import datetime
import tempfile
import torch
import torch.nn as nn
import torch.optim as optim
import h5py
from tensorboardX import SummaryWriter
import numpy as np
from patchnetvlad.training_tools.train_epoch import train_epoch
from patchnetvlad.training_tools.val import val
from patchnetvlad.training_tools.get_clusters import get_clusters
from patchnetvlad.training_tools.tools import save_checkpoint
from patchnetvlad.tools.datasets import input_transform
from patchnetvlad.models.models_generic import get_backend, get_model
from patchnetvlad.tools import PATCHNETVLAD_ROOT_DIR
from tqdm.auto import trange
from patchnetvlad.training_tools.msls import MSLS
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Patch-NetVLAD-train')
parser.add_argument('--config_path', type=str, default=join(PATCHNETVLAD_ROOT_DIR, 'configs/train.ini'),
help='File name (with extension) to an ini file that stores most of the configuration data for patch-netvlad')
parser.add_argument('--cache_path', type=str, default=tempfile.mkdtemp(),
help='Path to save cache, centroid data to.')
parser.add_argument('--save_path', type=str, default='',
help='Path to save checkpoints to')
parser.add_argument('--resume_path', type=str, default='',
help='Full path and name (with extension) to load checkpoint from, for resuming training.')
parser.add_argument('--cluster_path', type=str, default='',
help='Full path and name (with extension) to load cluster data from, for resuming training.')
parser.add_argument('--dataset_root_dir', type=str, default='/work/qvpr/data/raw/Mapillary_Street_Level_Sequences',
help='Root directory of dataset')
parser.add_argument('--identifier', type=str, default='mapillary_nopanos',
help='Description of this model, e.g. mapillary_nopanos_vgg16_netvlad')
parser.add_argument('--nEpochs', type=int, default=30, help='number of epochs to train for')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--save_every_epoch', action='store_true', help='Flag to set a separate checkpoint file for each new epoch')
parser.add_argument('--threads', type=int, default=6, help='Number of threads for each data loader to use')
parser.add_argument('--nocuda', action='store_true', help='If true, use CPU only. Else use GPU.')
# os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
opt = parser.parse_args()
print(opt)
configfile = opt.config_path
assert os.path.isfile(configfile)
config = configparser.ConfigParser()
config.read(configfile)
cuda = not opt.nocuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run with --nocuda")
device = torch.device("cuda" if cuda else "cpu")
random.seed(int(config['train']['seed']))
np.random.seed(int(config['train']['seed']))
torch.manual_seed(int(config['train']['seed']))
if cuda:
# noinspection PyUnresolvedReferences
torch.cuda.manual_seed(int(config['train']['seed']))
optimizer = None
scheduler = None
print('===> Building model')
encoder_dim, encoder = get_backend()
if opt.resume_path: # if already started training earlier and continuing
if isfile(opt.resume_path):
print("=> loading checkpoint '{}'".format(opt.resume_path))
checkpoint = torch.load(opt.resume_path, map_location=lambda storage, loc: storage)
config['global_params']['num_clusters'] = str(checkpoint['state_dict']['pool.centroids'].shape[0])
model = get_model(encoder, encoder_dim, opt, config['global_params'], append_pca_layer=False)
model.load_state_dict(checkpoint['state_dict'])
opt.start_epoch = checkpoint['epoch']
print("=> loaded checkpoint '{}'".format(opt.resume_path, ))
else:
raise FileNotFoundError("=> no checkpoint found at '{}'".format(opt.resume_path))
else: # if not, assume fresh training instance and will initially generate cluster centroids
print('===> Loading model')
config['global_params']['num_clusters'] = config['train']['num_clusters']
model = get_model(encoder, encoder_dim, opt, config['global_params'], append_pca_layer=False)
initcache = join(opt.cache_path, 'centroids', 'vgg16_' + 'mapillary_' + config['train'][
'num_clusters'] + '_desc_cen.hdf5')
if opt.cluster_path:
if isfile(opt.cluster_path):
if opt.cluster_path != initcache:
shutil.copyfile(opt.cluster_path, initcache)
else:
raise FileNotFoundError("=> no cluster data found at '{}'".format(opt.cluster_path))
else:
print('===> Finding cluster centroids')
print('===> Loading dataset(s) for clustering')
train_dataset = MSLS(opt.dataset_root_dir, mode='test', cities='train', transform=input_transform(),
bs=int(config['train']['cachebatchsize']), threads=opt.threads,
margin=float(config['train']['margin']))
model = model.to(device)
print('===> Calculating descriptors and clusters')
get_clusters(train_dataset, model, encoder_dim, device, opt, config)
# 复制文件
shutil.copyfile(initcache, initcache + '.bk')
os.remove(initcache)
os.rename(initcache + '.bk', initcache)
# a little hacky, but needed to easily run init_params
model = model.to(device="cpu")
with h5py.File(initcache, mode='r') as h5:
clsts = h5.get("centroids")[...]
traindescs = h5.get("descriptors")[...]
model.pool.init_params(clsts, traindescs)
del clsts, traindescs
isParallel = False
if int(config['global_params']['nGPU']) > 1 and torch.cuda.device_count() > 1:
model.encoder = nn.DataParallel(model.encoder)
model.pool = nn.DataParallel(model.pool)
isParallel = True
if config['train']['optim'] == 'ADAM':
optimizer = optim.Adam(filter(lambda par: par.requires_grad,
model.parameters()), lr=float(config['train']['lr'])) # , betas=(0,0.9))
elif config['train']['optim'] == 'SGD':
optimizer = optim.SGD(filter(lambda par: par.requires_grad,
model.parameters()), lr=float(config['train']['lr']),
momentum=float(config['train']['momentum']),
weight_decay=float(config['train']['weightDecay']))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=int(config['train']['lrstep']),
gamma=float(config['train']['lrgamma']))
else:
raise ValueError('Unknown optimizer: ' + config['train']['optim'])
criterion = nn.TripletMarginLoss(margin=float(config['train']['margin']) ** 0.5, p=2, reduction='sum').to(device)
model = model.to(device)
if opt.resume_path:
optimizer.load_state_dict(checkpoint['optimizer'])
print('===> Loading dataset(s)')
exlude_panos_training = not config['train'].getboolean('includepanos')
train_dataset = MSLS(opt.dataset_root_dir, mode='train', nNeg=int(config['train']['nNeg']), transform=input_transform(),
bs=int(config['train']['cachebatchsize']), threads=opt.threads, margin=float(config['train']['margin']),
exclude_panos=exlude_panos_training)
validation_dataset = MSLS(opt.dataset_root_dir, mode='val', transform=input_transform(),
bs=int(config['train']['cachebatchsize']), threads=opt.threads,
margin=float(config['train']['margin']), posDistThr=25)
print('===> Training query set:', len(train_dataset.qIdx))
print('===> Evaluating on val set, query count:', len(validation_dataset.qIdx))
print('===> Training model')
writer = SummaryWriter(
log_dir=join(opt.save_path, datetime.now().strftime('%b%d_%H-%M-%S') + '_' + opt.identifier))
# write checkpoints in logdir
logdir = writer.file_writer.get_logdir()
opt.save_file_path = join(logdir, 'checkpoints')
makedirs(opt.save_file_path)
not_improved = 0
best_score = 0
if opt.resume_path:
not_improved = checkpoint['not_improved']
best_score = checkpoint['best_score']
for epoch in trange(opt.start_epoch + 1, opt.nEpochs + 1, desc='Epoch number'.rjust(15), position=0):
train_epoch(train_dataset, model, optimizer, criterion, encoder_dim, device, epoch, opt, config, writer)
if scheduler is not None:
scheduler.step(epoch)
if (epoch % int(config['train']['evalevery'])) == 0:
recalls = val(validation_dataset, model, encoder_dim, device, opt, config, writer, epoch,
write_tboard=True, pbar_position=1)
is_best = recalls[5] > best_score
if is_best:
not_improved = 0
best_score = recalls[5]
else:
not_improved += 1
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'recalls': recalls,
'best_score': best_score,
'not_improved': not_improved,
'optimizer': optimizer.state_dict(),
'parallel': isParallel,
}, opt, is_best)
if int(config['train']['patience']) > 0 and not_improved > (int(config['train']['patience']) / int(config['train']['evalevery'])):
print('Performance did not improve for', config['train']['patience'], 'epochs. Stopping.')
break
print("=> Best Recall@5: {:.4f}".format(best_score), flush=True)
writer.close()
torch.cuda.empty_cache() # garbage clean GPU memory, a bug can occur when Pytorch doesn't automatically clear the
# memory after runs
print('Done')
| 0
| 0
| 0
|
428ecb59ce2f80850126f403f67d8d5fd1d223cb
| 133
|
py
|
Python
|
output/models/sun_data/attr_decl/ad_name/ad_name00104m/ad_name00104m5_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/sun_data/attr_decl/ad_name/ad_name00104m/ad_name00104m5_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/sun_data/attr_decl/ad_name/ad_name00104m/ad_name00104m5_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.sun_data.attr_decl.ad_name.ad_name00104m.ad_name00104m5_xsd.ad_name00104m5 import Root
__all__ = [
"Root",
]
| 22.166667
| 105
| 0.796992
|
from output.models.sun_data.attr_decl.ad_name.ad_name00104m.ad_name00104m5_xsd.ad_name00104m5 import Root
__all__ = [
"Root",
]
| 0
| 0
| 0
|
2299642d6845e185f37f34afaad6bf76d5e12526
| 4,075
|
py
|
Python
|
generated-libraries/python/netapp/aggr/filter_attrs_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/aggr/filter_attrs_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/aggr/filter_attrs_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.netapp_object import NetAppObject
class FilterAttrsInfo(NetAppObject):
"""
List of filters based on attributes of an
aggregate.
"""
_is_local = None
@property
def is_local(self):
"""
If true, returns aggregates owned by the local
node. It includes taken over aggregates with
'sfo' policy. Default is false.
"""
return self._is_local
@is_local.setter
_all = None
@property
def all(self):
"""
If true, returns all aggregates owned by the
local node and also taken over by the local
node. Default is false.
"""
return self._all
@all.setter
_is_sfo = None
@property
def is_sfo(self):
"""
If true, returns aggregates with 'sfo' HA
policy which includes local and taken over
aggregates. Default is false.
"""
return self._is_sfo
@is_sfo.setter
_is_dr_auxiliary = None
@property
def is_dr_auxiliary(self):
"""
If true, returns aggregates taken over by the
local node but owned by the partner of DR (disaster
recovery) partner (i.e. auxiliary partner).
Default is false.
"""
return self._is_dr_auxiliary
@is_dr_auxiliary.setter
_is_partner = None
@property
def is_partner(self):
"""
If true, returns aggregates taken over by the
local node but owned by the partner. Default
is false.
"""
return self._is_partner
@is_partner.setter
_is_cfo = None
@property
def is_cfo(self):
"""
If true, returns aggregates with 'cfo' HA
policy which includes local and taken over
aggregates. Default is false.
"""
return self._is_cfo
@is_cfo.setter
_is_dr_partner = None
@property
def is_dr_partner(self):
"""
If true, returns aggregates taken over by the
local node but owned by the DR (disaster
recovery) partner. Default is false.
"""
return self._is_dr_partner
@is_dr_partner.setter
@staticmethod
@staticmethod
| 28.900709
| 91
| 0.554847
|
from netapp.netapp_object import NetAppObject
class FilterAttrsInfo(NetAppObject):
"""
List of filters based on attributes of an
aggregate.
"""
_is_local = None
@property
def is_local(self):
"""
If true, returns aggregates owned by the local
node. It includes taken over aggregates with
'sfo' policy. Default is false.
"""
return self._is_local
@is_local.setter
def is_local(self, val):
if val != None:
self.validate('is_local', val)
self._is_local = val
_all = None
@property
def all(self):
"""
If true, returns all aggregates owned by the
local node and also taken over by the local
node. Default is false.
"""
return self._all
@all.setter
def all(self, val):
if val != None:
self.validate('all', val)
self._all = val
_is_sfo = None
@property
def is_sfo(self):
"""
If true, returns aggregates with 'sfo' HA
policy which includes local and taken over
aggregates. Default is false.
"""
return self._is_sfo
@is_sfo.setter
def is_sfo(self, val):
if val != None:
self.validate('is_sfo', val)
self._is_sfo = val
_is_dr_auxiliary = None
@property
def is_dr_auxiliary(self):
"""
If true, returns aggregates taken over by the
local node but owned by the partner of DR (disaster
recovery) partner (i.e. auxiliary partner).
Default is false.
"""
return self._is_dr_auxiliary
@is_dr_auxiliary.setter
def is_dr_auxiliary(self, val):
if val != None:
self.validate('is_dr_auxiliary', val)
self._is_dr_auxiliary = val
_is_partner = None
@property
def is_partner(self):
"""
If true, returns aggregates taken over by the
local node but owned by the partner. Default
is false.
"""
return self._is_partner
@is_partner.setter
def is_partner(self, val):
if val != None:
self.validate('is_partner', val)
self._is_partner = val
_is_cfo = None
@property
def is_cfo(self):
"""
If true, returns aggregates with 'cfo' HA
policy which includes local and taken over
aggregates. Default is false.
"""
return self._is_cfo
@is_cfo.setter
def is_cfo(self, val):
if val != None:
self.validate('is_cfo', val)
self._is_cfo = val
_is_dr_partner = None
@property
def is_dr_partner(self):
"""
If true, returns aggregates taken over by the
local node but owned by the DR (disaster
recovery) partner. Default is false.
"""
return self._is_dr_partner
@is_dr_partner.setter
def is_dr_partner(self, val):
if val != None:
self.validate('is_dr_partner', val)
self._is_dr_partner = val
@staticmethod
def get_api_name():
return "filter-attrs-info"
@staticmethod
def get_desired_attrs():
return [
'is-local',
'all',
'is-sfo',
'is-dr-auxiliary',
'is-partner',
'is-cfo',
'is-dr-partner',
]
def describe_properties(self):
return {
'is_local': { 'class': bool, 'is_list': False, 'required': 'optional' },
'all': { 'class': bool, 'is_list': False, 'required': 'optional' },
'is_sfo': { 'class': bool, 'is_list': False, 'required': 'optional' },
'is_dr_auxiliary': { 'class': bool, 'is_list': False, 'required': 'optional' },
'is_partner': { 'class': bool, 'is_list': False, 'required': 'optional' },
'is_cfo': { 'class': bool, 'is_list': False, 'required': 'optional' },
'is_dr_partner': { 'class': bool, 'is_list': False, 'required': 'optional' },
}
| 1,582
| 0
| 265
|
365e5102b4767a425fd536cf993ba62a742ed401
| 600
|
py
|
Python
|
bdshare/__init__.py
|
rochi88/dshare
|
9dc46baff822be2ae7a7541fa10535a0299fbb5e
|
[
"MIT"
] | 10
|
2020-04-09T06:34:48.000Z
|
2022-02-07T09:39:22.000Z
|
bdshare/__init__.py
|
rochi88/dshare
|
9dc46baff822be2ae7a7541fa10535a0299fbb5e
|
[
"MIT"
] | null | null | null |
bdshare/__init__.py
|
rochi88/dshare
|
9dc46baff822be2ae7a7541fa10535a0299fbb5e
|
[
"MIT"
] | 5
|
2020-08-06T06:54:04.000Z
|
2021-09-06T12:28:23.000Z
|
from ._version import __version__
__author__ = 'Raisul Islam'
"""
for trading data
"""
from bdshare.stock.trading import (get_current_trade_data, get_dsex_data, get_current_trading_code,
get_hist_data, get_basic_hist_data,
get_close_price_data, get_last_trade_price_data, get_cse_current_trade_data)
"""
for trading news
"""
from bdshare.stock.news import (get_agm_news, get_all_news)
"""
for market data
"""
from bdshare.stock.market import (get_market_inf, get_latest_pe, get_market_inf_more_data, get_market_depth_data)
| 27.272727
| 113
| 0.72
|
from ._version import __version__
__author__ = 'Raisul Islam'
"""
for trading data
"""
from bdshare.stock.trading import (get_current_trade_data, get_dsex_data, get_current_trading_code,
get_hist_data, get_basic_hist_data,
get_close_price_data, get_last_trade_price_data, get_cse_current_trade_data)
"""
for trading news
"""
from bdshare.stock.news import (get_agm_news, get_all_news)
"""
for market data
"""
from bdshare.stock.market import (get_market_inf, get_latest_pe, get_market_inf_more_data, get_market_depth_data)
| 0
| 0
| 0
|
c906a64412a8e14097c66c2644b8082b4f66a812
| 8,800
|
py
|
Python
|
tests/io/test_io_base.py
|
culebron/erde
|
9bbaaa1df46629a182c355413a120aa33dc6b377
|
[
"BSD-3-Clause"
] | 16
|
2021-08-24T05:59:04.000Z
|
2021-11-16T12:30:34.000Z
|
tests/io/test_io_base.py
|
culebron/erde
|
9bbaaa1df46629a182c355413a120aa33dc6b377
|
[
"BSD-3-Clause"
] | null | null | null |
tests/io/test_io_base.py
|
culebron/erde
|
9bbaaa1df46629a182c355413a120aa33dc6b377
|
[
"BSD-3-Clause"
] | 2
|
2021-08-30T10:27:13.000Z
|
2021-08-31T09:46:49.000Z
|
from contextlib import contextmanager
from erde.io.base import BaseReader, BaseWriter, BaseDriver
from shapely.geometry import Point, LineString, Polygon
from time import sleep
from unittest import mock
from unittest.mock import patch, Mock
import geopandas as gpd
import pytest
import sys
import traceback as tb
d = 'tests/io/data/'
polygons = gpd.read_file(d + 'polygons.gpkg', driver='GPKG')
out_data = []
runtime_error_arg = "let's fail"
from queue import Queue
@contextmanager
| 29.23588
| 164
| 0.731932
|
from contextlib import contextmanager
from erde.io.base import BaseReader, BaseWriter, BaseDriver
from shapely.geometry import Point, LineString, Polygon
from time import sleep
from unittest import mock
from unittest.mock import patch, Mock
import geopandas as gpd
import pytest
import sys
import traceback as tb
d = 'tests/io/data/'
polygons = gpd.read_file(d + 'polygons.gpkg', driver='GPKG')
def test_base_reader():
# (self, source, geometry_filter=None, chunk_size: int = 10_000, sync: bool = False, pbar: bool = True, queue_size=10, **kwargs)
df = polygons.copy()
s = 10
dfs = [df[i:i + s] for i in range(0, len(df), s)]
gen_obj = (i for i in df.geometry.values)
# calling BaseReader, with path to file as geometry_filter.
with patch('erde.read_stream', return_value=dfs) as mock:
BaseReader('mock source', 'path_to_geofile.gpkg')
mock.assert_called_with('path_to_geofile.gpkg', chunk_size=1, pbar=False, sync=True)
# cover the case where geometry_filter is None
with patch('erde.read_stream', return_value=dfs) as mock:
br = BaseReader('another mock')
mock.assert_not_called()
# now, br is used as a filter for other readers
br.total_rows = 10
# trying other objects as geometry_filter:
# single geometries; another BaseReader; generator; geoseries; geodataframe; list of geometries
for obj in [Point(1, 2), LineString([[1, 2], [3, 4]]), Polygon([[0, 1], [1, 1], [1, 0], [0, 0], [0, 1]], []), br, gen_obj, df.geometry, df, df.geometry.to_list()]:
BaseReader('mock source', geometry_filter=obj)
# geometry_filter can't be anything other than those above
with pytest.raises(TypeError):
BaseReader('mock source', Mock)
def test_raise_notimplemented():
# basereader must raise NotImplementedError when we try reading chunks of data
with BaseReader(d + 'polygons.gpkg', chunk_size=3, sync=False) as rd:
itr = iter(rd)
with pytest.raises(NotImplementedError):
next(itr)
with pytest.raises(NotImplementedError):
itr._read_sync()
with pytest.raises(NotImplementedError):
itr.stats('test')
# base driver has read_df and write_df methods
with pytest.raises(NotImplementedError):
BaseDriver.read_df('test')
with pytest.raises(NotImplementedError):
BaseDriver.write_df('test')
with BaseWriter('/tmp/not-implemented-writer.gpkg', sync=False) as wr:
for method in ('_write_sync', '_open_handler', '_cancel', '_close_handler'):
with pytest.raises(NotImplementedError):
getattr(wr, method)()
def test_parallel_coverage():
# just coverage
for s in (False, True):
with BaseReader(d + 'polygons.gpkg', chunk_size=3, sync=s) as rd:
pass
with pytest.raises(RuntimeError):
with BaseReader(d + 'polygons.gpkg', chunk_size=3) as rd:
# call __exit__ with runtime error
rd.out_q.put('a data object to keep the queue busy')
rd.out_q.put('another data object')
rd.background_process.start()
sleep(1)
raise RuntimeError
assert rd.out_q.empty()
assert not rd.background_process.is_alive()
def test_read_parallel():
# 1. should yield what was in the q and exit normally
# 2. when we raise emergency stop, it should break and raise the error from err_q
@contextmanager
def _setup():
with BaseReader(d + 'polygons.gpkg', chunk_size=3) as br:
# entered context but did not start process yet
assert not br.background_process.is_alive()
br.out_q.put(df)
br.out_q.put(df)
br.out_q.put(None)
yield br
df = polygons.copy()
with _setup() as br:
ret_data = list(br._read_parallel())
assert len(ret_data) == 2
with _setup() as br:
# generator will reach yield statement before we get any value, and will yield exactly one df
gen = br._read_parallel()
# pretending we had an exception
e = RuntimeError('arbitrary exception')
br.emergency_stop.value = True
br.err_q.put((e.__class__, e.args, None))
# the generator will run till the next yield, then our code is supposed to get the dataframe
with pytest.raises(RuntimeError):
df = next(gen)
def make_chunks():
df = polygons.copy()
return [df[i:i+2] for i in range(0, 6, 2)]
def test_read_worker():
# _worker should put all dfs yielded by self._read_sync() into que
from unittest import mock
@contextmanager
def _setup():
from queue import Queue
qq = Queue()
with BaseReader(d + 'polygons.gpkg', chunk_size=3) as br:
with mock.patch.object(br, '_read_sync', return_value=dfgen()) as rs, mock.patch.object(br, 'out_q', qq):
yield br, rs
orig_data = make_chunks()
def dfgen():
for i in orig_data:
yield i
# just a normal read, make sure it's called once
with _setup() as (br, rs):
br._worker()
rs.assert_called_once()
out_data = []
sleep(0)
while not br.out_q.empty():
out_data.append(br.out_q.get())
sleep(0) # otherwise que won't work
assert len(out_data) == len(orig_data) + 1
for a, b in zip(out_data, orig_data):
assert a.equals(b)
assert out_data[-1] is None
with _setup() as (br, rs):
e = RuntimeError('arbitrary exception')
br.emergency_stop.value = True
br.err_q.put((e.__class__, e.args, None))
br._worker()
sleep(0)
assert br.out_q.empty()
out_data = []
def _pretend_to_write(self, df):
# this function is called from worker and does nothing
out_data.append(df)
runtime_error_arg = "let's fail"
def _pretend_to_crash(self, df):
# _worker should process the exception
raise RuntimeError(runtime_error_arg)
def _pretend_keyboard_interrupt(self, df):
raise KeyboardInterrupt()
from queue import Queue
def _setup_writer_q():
q = Queue(maxsize=100)
in_data = make_chunks()
for df in in_data:
q.put(df)
q.put(None)
return in_data, q
@contextmanager
def patch_base_writer(**kwargs):
with mock.patch.multiple(BaseWriter, _close_handler=mock.MagicMock(return_value=None), _cancel=mock.MagicMock(return_value=None), **kwargs):
with BaseWriter('/tmp/test.gpkg', sync=True) as bw:
in_data, in_q = _setup_writer_q()
bw.in_q = in_q
bw.err_q = Queue()
bw._worker()
yield bw, in_data
def test_write_worker_ok():
with patch_base_writer(_write_sync=_pretend_to_write) as (bw, in_data):
# here we can't test that _close_handler is called only once -- it's been called twice, by _worker and __exit__
# because we assume sync mode (_worker not launched and __exit__ does cleanup), but then call _worker anyway
BaseWriter._close_handler.assert_called()
BaseWriter._cancel.assert_not_called()
for i, j in zip(in_data, out_data):
assert i.equals(j)
def test_write_worker_crash():
with patch_base_writer(_write_sync=_pretend_to_crash) as (bw, in_data):
# _cancel is called twice, by _worker and __exit__, because we pretended to run in sync mode, but then called _worker anyway, which calls _cancel.
# for the same reason, _close_handler is called by __exit__, not by _worker (who caught the exception)
BaseWriter._close_handler.assert_called_once()
BaseWriter._cancel.assert_called()
def test_write_worker_keyboard_interrupt():
with patch_base_writer(_write_sync=_pretend_keyboard_interrupt) as (bw, in_data):
BaseWriter._close_handler.assert_called_once()
BaseWriter._cancel.assert_called()
def test_write_worker_stops():
with patch_base_writer(_write_sync=_pretend_to_write) as (bw, in_data):
bw(polygons)
bw.emergency_stop.value = True
bw.background_process = Mock(join=Mock())
msg = '__call__ should raise this'
e = RuntimeError(msg)
bw.err_q.put((e.__class__, e.args, ''.join(tb.format_tb(sys.exc_info()[2]))))
bw._sync = False # pretending we're in async mode
with pytest.raises(RuntimeError):
bw(polygons)
bw.background_process.join.assert_called_once()
def test_background_process_start():
from erde import read_stream
with mock.patch('erde.io.base.Process', mock.MagicMock()), mock.patch('erde.io.base.Queue', mock.MagicMock()):
rd = read_stream(d + 'points.csv')
assert rd.background_process is None
with rd as rd1:
assert rd.background_process is not None
rd.background_process.start.assert_not_called()
next(rd1)
rd.background_process.start.assert_called_once()
rd2 = read_stream(d + 'points.csv')
next(rd2)
assert rd2.background_process is None
def test_cant_open_return_false():
# base driver itself has no path_regexp
assert not BaseDriver.can_open('some_path')
with mock.patch('erde.io.base.BaseDriver.path_regexp', r'^not_matching$'):
assert not BaseDriver.can_open('some_path')
assert BaseDriver.can_open('not_matching')
def test_default_chunk_size():
# should be 10_000
from shapely.geometry import Point
from erde import write_df, read_stream
df = gpd.GeoDataFrame([{'x': x, 'y': y, 'geometry': Point(x, y)} for x in range(-179, 0) for y in range(-89, 0)])
assert len(df) > 10_000
p = '/tmp/32krows.csv'
write_df(df, p)
rd = read_stream(p)
df = next(rd)
assert len(df) == 10_000
| 7,889
| 0
| 410
|
db4d07d0bd1c5381b9d97e2518fec054d7c83526
| 641
|
py
|
Python
|
bildungslogin-plugin/tests/unittests/test_models.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
bildungslogin-plugin/tests/unittests/test_models.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
bildungslogin-plugin/tests/unittests/test_models.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from bildungslogin_plugin.models import User
@pytest.mark.parametrize(
"test_data",
(
("id", ""),
("first_name", ""),
("last_name", ""),
("licenses", {""}),
("context", {}),
),
)
| 22.103448
| 65
| 0.616225
|
# -*- coding: utf-8 -*-
import pytest
from bildungslogin_plugin.models import User
def test_user_with_valid_attributes(valid_user_kwargs):
kwargs = valid_user_kwargs()
user = User(**kwargs)
assert user.dict() == kwargs
@pytest.mark.parametrize(
"test_data",
(
("id", ""),
("first_name", ""),
("last_name", ""),
("licenses", {""}),
("context", {}),
),
)
def test_user_attribute_validation(test_data, valid_user_kwargs):
attr, bad_value = test_data
kwargs = valid_user_kwargs()
kwargs[attr] = bad_value
with pytest.raises(ValueError):
User(**kwargs)
| 323
| 0
| 45
|
3d55315ed70523b663daf7176ef0b6850f78bd5a
| 270
|
bzl
|
Python
|
example_http_archive_locked_constrained/upgraded_LOCKFILE.bzl
|
fenollp/bazel_lock
|
bc5f8edeb5523502552c2f11221e2b8b38e92c1a
|
[
"Apache-2.0"
] | null | null | null |
example_http_archive_locked_constrained/upgraded_LOCKFILE.bzl
|
fenollp/bazel_lock
|
bc5f8edeb5523502552c2f11221e2b8b38e92c1a
|
[
"Apache-2.0"
] | null | null | null |
example_http_archive_locked_constrained/upgraded_LOCKFILE.bzl
|
fenollp/bazel_lock
|
bc5f8edeb5523502552c2f11221e2b8b38e92c1a
|
[
"Apache-2.0"
] | null | null | null |
locked = {}
locked["http_archive> tar.gz github.com/bazelbuild/bazel-skylib ~=0.8 "] = {"sha256": "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf043c7ca0", "url": "https://github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz"}
| 90
| 257
| 0.77037
|
locked = {}
locked["http_archive> tar.gz github.com/bazelbuild/bazel-skylib ~=0.8 "] = {"sha256": "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf043c7ca0", "url": "https://github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz"}
| 0
| 0
| 0
|
a054bd267afac311974d3a9c46078464614a0c09
| 628
|
py
|
Python
|
p046.py
|
yehnan/project_euler_python
|
9c8a50e992b71c1c313b08a16ea24298ce5cf020
|
[
"MIT"
] | 1
|
2017-03-29T19:30:32.000Z
|
2017-03-29T19:30:32.000Z
|
p046.py
|
yehnan/project_euler_python
|
9c8a50e992b71c1c313b08a16ea24298ce5cf020
|
[
"MIT"
] | null | null | null |
p046.py
|
yehnan/project_euler_python
|
9c8a50e992b71c1c313b08a16ea24298ce5cf020
|
[
"MIT"
] | 1
|
2018-10-29T02:40:06.000Z
|
2018-10-29T02:40:06.000Z
|
# Problem 46: Goldbach's other conjecture
# https://projecteuler.net/problem=46
#
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2 and sys.argv[1] == 'test':
print(test())
else:
print(main())
| 18.470588
| 68
| 0.469745
|
# Problem 46: Goldbach's other conjecture
# https://projecteuler.net/problem=46
def goc():
n = 5
flag = +1
primes = set([2, 3])
while True:
if all(n%p!=0 for p in primes):
primes.add(n)
else:
if not any((n - 2*i*i) in primes for i in range(1, n)):
return n
n += 3-flag
flag = -flag
#
def test():
return 'No test'
def main():
return goc()
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2 and sys.argv[1] == 'test':
print(test())
else:
print(main())
| 305
| 0
| 73
|
c7d141b05d1b2740dfa05cc9ef4888a3baa96d01
| 681
|
py
|
Python
|
setup.py
|
zbindenp/ch.zbindenonline.picture
|
780b6099537038c0f54f5401b7388527ab7cad02
|
[
"MIT"
] | null | null | null |
setup.py
|
zbindenp/ch.zbindenonline.picture
|
780b6099537038c0f54f5401b7388527ab7cad02
|
[
"MIT"
] | null | null | null |
setup.py
|
zbindenp/ch.zbindenonline.picture
|
780b6099537038c0f54f5401b7388527ab7cad02
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
try:
long_description = open("README.md").read()
except IOError:
long_description = ""
setup(
name="ch.zbindenonline.picture",
version="0.0.3a",
description="A library to handle pictures",
license="MIT",
author="Patrick Zbinden",
author_email="patrick+dev@zbinden-online.ch",
packages=find_packages(),
install_requires=['Pillow', 'click'],
entry_points={
'console_scripts': [
'crop=ch.zbindenonline.picture.crop:crop'
]
},
long_description=long_description,
classifiers=[
"Programming Language :: Python :: 3",
]
)
| 24.321429
| 53
| 0.637298
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
try:
long_description = open("README.md").read()
except IOError:
long_description = ""
setup(
name="ch.zbindenonline.picture",
version="0.0.3a",
description="A library to handle pictures",
license="MIT",
author="Patrick Zbinden",
author_email="patrick+dev@zbinden-online.ch",
packages=find_packages(),
install_requires=['Pillow', 'click'],
entry_points={
'console_scripts': [
'crop=ch.zbindenonline.picture.crop:crop'
]
},
long_description=long_description,
classifiers=[
"Programming Language :: Python :: 3",
]
)
| 0
| 0
| 0
|
309efa05ea0c3fc33bb125345a4877e75b1e302a
| 11,782
|
py
|
Python
|
tests/graphs/algorithms/test_paths.py
|
ref-humbold/AlgoLib_Python
|
05f725504656ec93b879374a8cd87464d88fff77
|
[
"Apache-2.0"
] | null | null | null |
tests/graphs/algorithms/test_paths.py
|
ref-humbold/AlgoLib_Python
|
05f725504656ec93b879374a8cd87464d88fff77
|
[
"Apache-2.0"
] | null | null | null |
tests/graphs/algorithms/test_paths.py
|
ref-humbold/AlgoLib_Python
|
05f725504656ec93b879374a8cd87464d88fff77
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests: Algorithms for shortest paths"""
import unittest
from assertpy import assert_that
from algolib.graphs import DirectedSimpleGraph, UndirectedSimpleGraph
from algolib.graphs.algorithms import Paths, bellman_ford, dijkstra, floyd_warshall
| 56.917874
| 99
| 0.566627
|
# -*- coding: utf-8 -*-
"""Tests: Algorithms for shortest paths"""
import unittest
from assertpy import assert_that
from algolib.graphs import DirectedSimpleGraph, UndirectedSimpleGraph
from algolib.graphs.algorithms import Paths, bellman_ford, dijkstra, floyd_warshall
def _from_list(graph, distances):
return {graph.get_vertex(i): d for i, d in enumerate(distances)}
def _from_matrix(graph, distances):
return {(graph.get_vertex(i), graph.get_vertex(j)): d for i, ds in enumerate(distances)
for j, d in enumerate(ds)}
class PathsTest(unittest.TestCase):
INF = Paths.INFINITY
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._directed_graph = None
self._undirected_graph = None
def setUp(self):
self._directed_graph = DirectedSimpleGraph(range(10))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(0),
self._directed_graph.get_vertex(1), self._Weight(4))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(1),
self._directed_graph.get_vertex(4), self._Weight(7))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(1),
self._directed_graph.get_vertex(7), self._Weight(12))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(2),
self._directed_graph.get_vertex(4), self._Weight(6))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(2),
self._directed_graph.get_vertex(6), self._Weight(8))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(3),
self._directed_graph.get_vertex(0), self._Weight(3))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(3),
self._directed_graph.get_vertex(7), self._Weight(5))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(4),
self._directed_graph.get_vertex(5), self._Weight(1))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(4),
self._directed_graph.get_vertex(3), self._Weight(10))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(5),
self._directed_graph.get_vertex(6), self._Weight(4))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(5),
self._directed_graph.get_vertex(8), self._Weight(2))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(6),
self._directed_graph.get_vertex(5), self._Weight(7))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(7),
self._directed_graph.get_vertex(5), self._Weight(2))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(7),
self._directed_graph.get_vertex(8), self._Weight(6))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(8),
self._directed_graph.get_vertex(9), self._Weight(10))
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(9),
self._directed_graph.get_vertex(6), self._Weight(3))
self._undirected_graph = UndirectedSimpleGraph(range(10))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(0),
self._undirected_graph.get_vertex(1),
self._Weight(4))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(1),
self._undirected_graph.get_vertex(4),
self._Weight(7))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(1),
self._undirected_graph.get_vertex(7),
self._Weight(12))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(2),
self._undirected_graph.get_vertex(6),
self._Weight(8))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(3),
self._undirected_graph.get_vertex(0),
self._Weight(3))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(3),
self._undirected_graph.get_vertex(7),
self._Weight(5))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(4),
self._undirected_graph.get_vertex(5),
self._Weight(1))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(4),
self._undirected_graph.get_vertex(3),
self._Weight(10))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(5),
self._undirected_graph.get_vertex(8),
self._Weight(2))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(7),
self._undirected_graph.get_vertex(5),
self._Weight(2))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(7),
self._undirected_graph.get_vertex(8),
self._Weight(6))
self._undirected_graph.add_edge_between(self._undirected_graph.get_vertex(9),
self._undirected_graph.get_vertex(6),
self._Weight(3))
def test__bellman_ford__when_directed_graph(self):
# given
distances = [20, 0, self.INF, 17, 7, 8, 12, 12, 10, 20]
expected = _from_list(self._directed_graph, distances)
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(2),
self._directed_graph.get_vertex(1), self._Weight(-2))
# when
result = bellman_ford(self._directed_graph, self._directed_graph.get_vertex(1))
# then
assert_that(result).is_equal_to(expected)
def test__bellman_ford__when_undirected_graph(self):
# given
distances = [4, 0, self.INF, 7, 7, 8, self.INF, 10, 10, self.INF]
expected = _from_list(self._undirected_graph, distances)
# when
result = bellman_ford(self._undirected_graph.as_directed(),
self._undirected_graph.get_vertex(1))
# then
assert_that(result).is_equal_to(expected)
def test__bellman_ford__when_negative_cycle__then_value_error(self):
# given
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(8),
self._directed_graph.get_vertex(3),
self._Weight(-20.0))
# when
def function(graph):
return bellman_ford(graph, graph.get_vertex(1))
# then
assert_that(function).raises(ValueError).when_called_with(self._directed_graph)
def test__dijkstra__when_directed_graph(self):
# given
distances = [20, 0, self.INF, 17, 7, 8, 12, 12, 10, 20]
expected = _from_list(self._directed_graph, distances)
# when
result = dijkstra(self._directed_graph, self._directed_graph.get_vertex(1))
# then
assert_that(result).is_equal_to(expected)
def test__dijkstra__when_undirected_graph(self):
# given
distances = [4, 0, self.INF, 7, 7, 8, self.INF, 10, 10, self.INF]
expected = _from_list(self._undirected_graph, distances)
# when
result = dijkstra(self._undirected_graph, self._undirected_graph.get_vertex(1))
# then
assert_that(result).is_equal_to(expected)
def test__dijkstra__when_negative_edge__then_value_error(self):
# given
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(2),
self._directed_graph.get_vertex(1), self._Weight(-2))
# when
def function(graph):
dijkstra(graph, graph.get_vertex(1))
# then
assert_that(function).raises(ValueError).when_called_with(self._directed_graph)
def test__floyd_warshall__when_directed_graph(self):
# given
distances = [[0, 4, self.INF, 21, 11, 12, 16, 16, 14, 24],
[20, 0, self.INF, 17, 7, 8, 12, 12, 10, 20],
[18, -2, 0, 15, 5, 6, 8, 10, 8, 18],
[3, 7, self.INF, 0, 14, 7, 11, 5, 9, 19],
[13, 17, self.INF, 10, 0, 1, 5, 15, 3, 13],
[self.INF, self.INF, self.INF, self.INF, self.INF, 0, 4, self.INF, 2, 12],
[self.INF, self.INF, self.INF, self.INF, self.INF, 7, 0, self.INF, 9, 19],
[self.INF, self.INF, self.INF, self.INF, self.INF, 2, 6, 0, 4, 14],
[self.INF, self.INF, self.INF, self.INF, self.INF, 20, 13, self.INF, 0, 10],
[self.INF, self.INF, self.INF, self.INF, self.INF, 10, 3, self.INF, 12, 0]]
expected = _from_matrix(self._directed_graph, distances)
self._directed_graph.add_edge_between(self._directed_graph.get_vertex(2),
self._directed_graph.get_vertex(1), self._Weight(-2))
# when
result = floyd_warshall(self._directed_graph)
# then
assert_that(result).is_equal_to(expected)
def test__floyd_warshall__when_undirected_graph(self):
# given
distances = \
[[0, 4, self.INF, 3, 11, 10, self.INF, 8, 12, self.INF],
[4, 0, self.INF, 7, 7, 8, self.INF, 10, 10, self.INF],
[self.INF, self.INF, 0, self.INF, self.INF, self.INF, 8, self.INF, self.INF, 11],
[3, 7, self.INF, 0, 8, 7, self.INF, 5, 9, self.INF],
[11, 7, self.INF, 8, 0, 1, self.INF, 3, 3, self.INF],
[10, 8, self.INF, 7, 1, 0, self.INF, 2, 2, self.INF],
[self.INF, self.INF, 8, self.INF, self.INF, self.INF, 0, self.INF, self.INF, 3],
[8, 10, self.INF, 5, 3, 2, self.INF, 0, 4, self.INF],
[12, 10, self.INF, 9, 3, 2, self.INF, 4, 0, self.INF],
[self.INF, self.INF, 11, self.INF, self.INF, self.INF, 3, self.INF, self.INF, 0]]
expected = _from_matrix(self._undirected_graph, distances)
# when
result = floyd_warshall(self._undirected_graph.as_directed())
# then
assert_that(result).is_equal_to(expected)
class _Weight:
def __init__(self, weight):
self.weight = weight
| 11,079
| 359
| 69
|
0dd67ee07bafa26ffbfa3c77b3c517631eb548ca
| 6,441
|
py
|
Python
|
moler/cmd/unix/ip_link.py
|
jochenparm/moler
|
0253d677e0ef150206758c7991197ba5687d0965
|
[
"BSD-3-Clause"
] | 57
|
2018-02-20T08:16:47.000Z
|
2022-03-28T10:36:57.000Z
|
moler/cmd/unix/ip_link.py
|
jochenparm/moler
|
0253d677e0ef150206758c7991197ba5687d0965
|
[
"BSD-3-Clause"
] | 377
|
2018-07-19T11:56:27.000Z
|
2021-07-09T13:08:12.000Z
|
moler/cmd/unix/ip_link.py
|
jochenparm/moler
|
0253d677e0ef150206758c7991197ba5687d0965
|
[
"BSD-3-Clause"
] | 24
|
2018-04-14T20:49:40.000Z
|
2022-03-29T10:44:26.000Z
|
# -*- coding: utf-8 -*-
"""
Ip link command module.
"""
__author__ = 'Julia Patacz'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = 'julia.patacz@nokia.com'
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import ParsingDone
COMMAND_OUTPUT = """
host:~ # ip link show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 08:00:27:05:ae:4d brd ff:ff:ff:ff:ff:ff
host:~ # """
COMMAND_KWARGS = {'action': 'show'}
COMMAND_RESULT = {
'1': {'group': 'default',
'interface': 'lo',
'mode': 'DEFAULT',
'mtu': '65536',
'qdisc': 'noqueue',
'state': 'UNKNOWN',
'transmission': '<LOOPBACK,UP,LOWER_UP>',
'link/loopback': '00:00:00:00:00:00',
'brd': '00:00:00:00:00:00',
},
'2': {'group': 'default',
'interface': 'eth0',
'mode': 'DEFAULT',
'mtu': '1500',
'qdisc': 'pfifo_fast',
'state': 'UP',
'transmission': '<BROADCAST,MULTICAST,UP,LOWER_UP>',
'qlen': '1000',
'link/ether': '08:00:27:05:ae:4d',
'brd': 'ff:ff:ff:ff:ff:ff',
}
}
| 43.52027
| 201
| 0.620866
|
# -*- coding: utf-8 -*-
"""
Ip link command module.
"""
__author__ = 'Julia Patacz'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = 'julia.patacz@nokia.com'
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import ParsingDone
class IpLink(GenericUnixCommand):
def __init__(self, connection, action, prompt=None, newline_chars=None, options=None, runner=None):
super(IpLink, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
self.ret_required = False
self.action = action
self.options = options
self.line = False
def build_command_string(self):
cmd = "ip link {}".format(self.action)
if self.options:
cmd = "{} {}".format(cmd, self.options)
return cmd
def on_new_line(self, line, is_full_line):
if is_full_line:
try:
self._parse_line_brd(line)
self._parse_line_int_trans_mtu_qdisc_state_mode_group_qlen(line)
self._parse_line_int_trans_mtu_qdisc_state_mode_group(line)
except ParsingDone:
pass
return super(IpLink, self).on_new_line(line, is_full_line)
# link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
_re_link_brd = re.compile(
r"(?P<LINK>\S+)\s+(?P<VAL>\S+:\S+:\S+:\S+:\S+:\S+)\s+(?P<KEY>\S+)\s+(?P<VAL_2>\S+:\S+:\S+:\S+:\S+:\S+)")
def _parse_line_brd(self, line):
if self.line and self._regex_helper.search_compiled(IpLink._re_link_brd, line):
temp_link = self._regex_helper.group('LINK')
temp_val = self._regex_helper.group('VAL')
temp_brd = self._regex_helper.group('KEY')
temp_val_2 = self._regex_helper.group('VAL_2')
self.current_ret[self.line][temp_link] = temp_val
self.current_ret[self.line][temp_brd] = temp_val_2
self.line = False
raise ParsingDone
# 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
_re_line_int_trans_mtu_qdisc_state_mode_group_qlen = re.compile(
r"(?P<LINE>\d+):\s+(?P<INT>\S+):\s+(?P<TRANS>\S+)\s+mtu\s+(?P<MTU>\S+)\s+qdisc\s+(?P<QDISC>\S+)\s+state\s+(?P<STATE>\S+)\s+mode\s+(?P<MODE>\S+)\s+group\s+(?P<GROUP>\S+)\s+qlen\s+(?P<QLEN>\S+)")
def _parse_line_int_trans_mtu_qdisc_state_mode_group_qlen(self, line):
if self._regex_helper.search_compiled(IpLink._re_line_int_trans_mtu_qdisc_state_mode_group_qlen, line):
temp_line = self._regex_helper.group('LINE')
temp_int = self._regex_helper.group('INT')
temp_trans = self._regex_helper.group('TRANS')
temp_mtu = self._regex_helper.group('MTU')
temp_qdisc = self._regex_helper.group('QDISC')
temp_state = self._regex_helper.group('STATE')
temp_mode = self._regex_helper.group('MODE')
temp_group = self._regex_helper.group('GROUP')
temp_qlen = self._regex_helper.group('QLEN')
if temp_line not in self.current_ret.keys():
self.current_ret[temp_line] = {}
self.current_ret[temp_line]['interface'] = temp_int
self.current_ret[temp_line]['transmission'] = temp_trans
self.current_ret[temp_line]['mtu'] = temp_mtu
self.current_ret[temp_line]['qdisc'] = temp_qdisc
self.current_ret[temp_line]['state'] = temp_state
self.current_ret[temp_line]['mode'] = temp_mode
self.current_ret[temp_line]['group'] = temp_group
self.current_ret[temp_line]['qlen'] = temp_qlen
self.line = temp_line
raise ParsingDone
# 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default
_re_line_int_trans_mtu_qdisc_state_mode_group = re.compile(
r"(?P<LINE>\d+):\s+(?P<INT>\S+):\s+(?P<TRANS>\S+)\s+mtu\s+(?P<MTU>\S+)\s+qdisc\s+(?P<QDISC>\S+)\s+state\s+(?P<STATE>\S+)\s+mode\s+(?P<MODE>\S+)\s+group\s+(?P<GROUP>\S+)")
def _parse_line_int_trans_mtu_qdisc_state_mode_group(self, line):
if self._regex_helper.search_compiled(IpLink._re_line_int_trans_mtu_qdisc_state_mode_group, line):
temp_line = self._regex_helper.group('LINE')
temp_int = self._regex_helper.group('INT')
temp_trans = self._regex_helper.group('TRANS')
temp_mtu = self._regex_helper.group('MTU')
temp_qdisc = self._regex_helper.group('QDISC')
temp_state = self._regex_helper.group('STATE')
temp_mode = self._regex_helper.group('MODE')
temp_group = self._regex_helper.group('GROUP')
if temp_line not in self.current_ret.keys():
self.current_ret[temp_line] = {}
self.current_ret[temp_line]['interface'] = temp_int
self.current_ret[temp_line]['transmission'] = temp_trans
self.current_ret[temp_line]['mtu'] = temp_mtu
self.current_ret[temp_line]['qdisc'] = temp_qdisc
self.current_ret[temp_line]['state'] = temp_state
self.current_ret[temp_line]['mode'] = temp_mode
self.current_ret[temp_line]['group'] = temp_group
self.line = temp_line
COMMAND_OUTPUT = """
host:~ # ip link show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 08:00:27:05:ae:4d brd ff:ff:ff:ff:ff:ff
host:~ # """
COMMAND_KWARGS = {'action': 'show'}
COMMAND_RESULT = {
'1': {'group': 'default',
'interface': 'lo',
'mode': 'DEFAULT',
'mtu': '65536',
'qdisc': 'noqueue',
'state': 'UNKNOWN',
'transmission': '<LOOPBACK,UP,LOWER_UP>',
'link/loopback': '00:00:00:00:00:00',
'brd': '00:00:00:00:00:00',
},
'2': {'group': 'default',
'interface': 'eth0',
'mode': 'DEFAULT',
'mtu': '1500',
'qdisc': 'pfifo_fast',
'state': 'UP',
'transmission': '<BROADCAST,MULTICAST,UP,LOWER_UP>',
'qlen': '1000',
'link/ether': '08:00:27:05:ae:4d',
'brd': 'ff:ff:ff:ff:ff:ff',
}
}
| 3,904
| 1,116
| 23
|
6f494cab63080c693b2c3527d3e1b6dabae2dbb0
| 2,718
|
py
|
Python
|
paddle_ds/extras/srt_to_textfile.py
|
dalonlobo/asr
|
56d08017a69dd2c80a03310cbf2e3690ca287b96
|
[
"MIT"
] | null | null | null |
paddle_ds/extras/srt_to_textfile.py
|
dalonlobo/asr
|
56d08017a69dd2c80a03310cbf2e3690ca287b96
|
[
"MIT"
] | 1
|
2021-06-01T21:56:54.000Z
|
2021-06-01T21:56:54.000Z
|
paddle_ds/extras/srt_to_textfile.py
|
dalonlobo/asr
|
56d08017a69dd2c80a03310cbf2e3690ca287b96
|
[
"MIT"
] | 1
|
2019-02-15T12:08:06.000Z
|
2019-02-15T12:08:06.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 17 15:06:25 2018
@author: dalonlobo
"""
from __future__ import absolute_import, division, \
print_function, unicode_literals
import os
import sys
import argparse
import logging
import pysrt
import glob
from custom_utils import pre_process_srt
logger = logging.getLogger("__main__")
def convert_srt(file_name):
"""Converts one srt to text file"""
abs_path = os.path.dirname(file_name)
op_name = os.path.basename(file_name) + ".ref.txt"
# Read the srt file
subtitles = pysrt.open(file_name)
logger.info("Read the srt file " + file_name)
with open(os.path.join(abs_path,op_name), "w") as f:
for index, subtitle in enumerate(subtitles):
sub = pre_process_srt(subtitle.text)
f.write(sub + " " if sub else "")
logger.info("Done writing to text: " + file_name)
if __name__ == "__main__":
"""
This script will convert all srt files in all subfolders to text files
"""
logs_path = os.path.basename(__file__) + ".logs"
logging.basicConfig(filename=logs_path,
filemode='a',
format='%(asctime)s [%(name)s:%(levelname)s] [%(filename)s:%(funcName)s] #%(lineno)d: %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
print("Logs are in ", os.path.abspath(logs_path), file=sys.stderr)
print("Run the following command to view logs:\n", file=sys.stderr)
print("tail -f {}".format(os.path.abspath(logs_path)), file=sys.stderr)
parser = argparse.ArgumentParser(description="Download videos from youtube")
parser.add_argument('--srcpath', type=str,
help='Path to the folder containing srt files')
# Remove the below line
args = parser.parse_args(["--srcpath", "Videos"])
# Uncomment the below line
# args = parser.parse_args()
# Path to the source folder, where srt will be saved
srcpath = os.path.abspath(args.srcpath)
logger.debug("Reading the files: \n")
for dirs in os.listdir(srcpath):
vid_directory = os.path.join(srcpath, dirs)
if not os.path.isdir(vid_directory):
continue # If its not directory, just continue
for file_name in glob.glob(vid_directory + os.path.sep + "*.srt"):
logger.info("Passing: " + file_name)
convert_srt(file_name)
logger.info("All srt files converted")
print("All srt files converted", file=sys.stderr)
logger.info("#########################")
logger.info(".....Exiting program.....")
logger.info("#########################")
print(".....Exiting program.....", file=sys.stderr)
| 37.232877
| 107
| 0.635394
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 17 15:06:25 2018
@author: dalonlobo
"""
from __future__ import absolute_import, division, \
print_function, unicode_literals
import os
import sys
import argparse
import logging
import pysrt
import glob
from custom_utils import pre_process_srt
logger = logging.getLogger("__main__")
def convert_srt(file_name):
"""Converts one srt to text file"""
abs_path = os.path.dirname(file_name)
op_name = os.path.basename(file_name) + ".ref.txt"
# Read the srt file
subtitles = pysrt.open(file_name)
logger.info("Read the srt file " + file_name)
with open(os.path.join(abs_path,op_name), "w") as f:
for index, subtitle in enumerate(subtitles):
sub = pre_process_srt(subtitle.text)
f.write(sub + " " if sub else "")
logger.info("Done writing to text: " + file_name)
if __name__ == "__main__":
"""
This script will convert all srt files in all subfolders to text files
"""
logs_path = os.path.basename(__file__) + ".logs"
logging.basicConfig(filename=logs_path,
filemode='a',
format='%(asctime)s [%(name)s:%(levelname)s] [%(filename)s:%(funcName)s] #%(lineno)d: %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
print("Logs are in ", os.path.abspath(logs_path), file=sys.stderr)
print("Run the following command to view logs:\n", file=sys.stderr)
print("tail -f {}".format(os.path.abspath(logs_path)), file=sys.stderr)
parser = argparse.ArgumentParser(description="Download videos from youtube")
parser.add_argument('--srcpath', type=str,
help='Path to the folder containing srt files')
# Remove the below line
args = parser.parse_args(["--srcpath", "Videos"])
# Uncomment the below line
# args = parser.parse_args()
# Path to the source folder, where srt will be saved
srcpath = os.path.abspath(args.srcpath)
logger.debug("Reading the files: \n")
for dirs in os.listdir(srcpath):
vid_directory = os.path.join(srcpath, dirs)
if not os.path.isdir(vid_directory):
continue # If its not directory, just continue
for file_name in glob.glob(vid_directory + os.path.sep + "*.srt"):
logger.info("Passing: " + file_name)
convert_srt(file_name)
logger.info("All srt files converted")
print("All srt files converted", file=sys.stderr)
logger.info("#########################")
logger.info(".....Exiting program.....")
logger.info("#########################")
print(".....Exiting program.....", file=sys.stderr)
| 0
| 0
| 0
|
638f39f37cb1887ac6653a6ebf222892d957f435
| 3,439
|
py
|
Python
|
hydrodynamics/hydrodynamicsbase.py
|
saridut/FloriPy
|
0117d358b9c2362ea32ecf9ec719fdaed87d3e14
|
[
"MIT"
] | null | null | null |
hydrodynamics/hydrodynamicsbase.py
|
saridut/FloriPy
|
0117d358b9c2362ea32ecf9ec719fdaed87d3e14
|
[
"MIT"
] | null | null | null |
hydrodynamics/hydrodynamicsbase.py
|
saridut/FloriPy
|
0117d358b9c2362ea32ecf9ec719fdaed87d3e14
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg as sla
| 29.904348
| 81
| 0.575749
|
#!/usr/bin/env python
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg as sla
class HydrodynamicsBase:
__metaclass__ = ABCMeta
@abstractmethod
def _calc_grm(self):
pass
@abstractmethod
def _calc_gmm(self):
pass
@abstractmethod
def update(self, time):
pass
def grmvec(self, vec, out=None):
'''
Returns the product of the grand resistance matrix with another numpy
vector.
'''
if out is None:
out = np.zeros((self._grm.shape[0],))
out = np.dot(self._grm, vec)
return out
def gmmvec(self, vec, out=None):
'''
Returns the product of the grand mobility matrix with another numpy
vector.
'''
if out is None:
out = np.zeros((self._gmm.shape[0],))
out = np.dot(self._gmm, vec, out=out)
return out
def grmvel(self, out=None):
'''
Returns the product of the grand resistance matrix with the fluid
velocity at the given points.
'''
points = np.array(self._model.get_coms())
vec = self._flowfield.get_oue(points, arrangement='interlaced', out=None)
return self.grmvec(vec, out=out)
def gmmvel(self, out=None):
'''
Returns the product of the grand mobility matrix with the fluid
velocity at the given points.
'''
points = np.array(self._model.get_coms())
vec = self._flowfield.get_oue(points, arrangement='interlaced', out=None)
return self.gmmvec(vec, out=out)
def getrans_grm_ge(self, ge, out=None):
'''
Returns the product A'RA, where A is a matrix, R is the resistance
matrix for force and torque, and A' is the transpose of A.
'''
if out is None:
n = ge.shape[1]
out = np.zeros((n,n))
grm = np.zeros((6*self._num_bodies,6*self._num_bodies))
for i in range(self._num_bodies):
grm[6*i:6*i+6, 6*i:6*i+6] = self._grm[6*i:6*i+6, 15*i:15*i+6]
out = np.dot(ge.T, np.dot(grm, ge), out=out)
return out
def getrans_gmm_ge(self, ge, out=None):
'''
Returns the product A'MA, where A is a matrix, M is the mobility
matrix for force and torque, and A' is the transpose of A.
'''
if out is None:
n = ge.shape[1]
out = np.zeros((n,n))
gmm = np.zeros((6*self._num_bodies,6*self._num_bodies))
for i in range(self._num_bodies):
gmm[6*i:6*i+6, 6*i:6*i+6] = self._gmm[6*i:6*i+6, 15*i:15*i+6]
out = np.dot(ge.T, np.dot(gmm, ge), out=out)
return out
def grm_cholesky_decomp(self):
'''
Returns the Cholesky decomposition of the grand resistance matrix.
'''
grm = np.zeros((6*self._num_bodies,6*self._num_bodies))
for i in range(self._num_bodies):
grm[6*i:6*i+6, 6*i:6*i+6] = self._grm[6*i:6*i+6, 15*i:15*i+6]
return sla.cholesky(grm, lower=True)
def gmm_cholesky_decomp(self):
'''
Returns the Cholesky decomposition of the grand mobility matrix.
'''
gmm = np.zeros((6*self._num_bodies,6*self._num_bodies))
for i in range(self._num_bodies):
gmm[6*i:6*i+6, 6*i:6*i+6] = self._gmm[6*i:6*i+6, 15*i:15*i+6]
return sla.cholesky(self._gmm, lower=True)
| 39
| 3,262
| 23
|
932de32de77c81ff44b954e310c1d7815e13ecce
| 16,443
|
py
|
Python
|
modules/FIC_Github.py
|
Akhliskun/firefox-infra-changelog
|
5ca76e0be012acbea29d703c7728ecd92aff6f89
|
[
"MIT"
] | null | null | null |
modules/FIC_Github.py
|
Akhliskun/firefox-infra-changelog
|
5ca76e0be012acbea29d703c7728ecd92aff6f89
|
[
"MIT"
] | 74
|
2018-07-13T00:34:36.000Z
|
2018-12-17T16:23:24.000Z
|
modules/FIC_Github.py
|
Akhliskun/firefox-infra-changelog
|
5ca76e0be012acbea29d703c7728ecd92aff6f89
|
[
"MIT"
] | 7
|
2018-07-14T15:51:17.000Z
|
2018-11-29T01:14:11.000Z
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import github3
from modules.FIC_FileHandler import FICFileHandler
from modules.FIC_DataVault import FICDataVault
from modules.config import *
from modules.FIC_Utilities import return_time
from modules.config import CHANGELOG_JSON_PATH, CHANGELOG_MD_PATH, CHANGELOG_REPO_PATH, INDIVIDUAL_REPO_DAYS
from git import Repo
import os
import json
import requests
import re
class FICGithub(FICFileHandler, FICDataVault):
"""
This class handles the extracting, filtering and writing data into json file for git repos
that affect firefox infra
"""
def read_repo(self):
"""
It calls the method that gets all the repository data, using it's name, team and the authentication credential
as parameters.
"""
return self._init_github(self._gh, self.team_name, self.repo_name)
def limit_checker(self):
"""Checks for the amount of limit requests remaining on a specific token.
Switches to a new token from a group of defined tokens upon reaching 5 requests remaining
"""
limit_requests = self._gh.ratelimit_remaining
self.LOGGER.info(f"The number of limit requests is: {limit_requests}")
if limit_requests < 5 and len(GIT_TOKEN) > 1:
# switch token
if self._switch_token():
return True
else:
# check if the rate limit was reset for the second use of a token
if limit_requests < 5:
self._get_reset_time()
return False
else:
return True
# check the reset time in case of a single token
elif limit_requests < 5:
self._get_reset_time()
return False
# return True in case of limit request not reached
else:
return True
def _switch_token(self):
"""
Method that switches git tokens
"""
# get next token
switch = self._get_token()
# re-logging with the new token
self._token = os.environ.get(GIT_TOKEN[self.token_counter])
self._gh = self._auth()
self.LOGGER.info("The token was changed.")
return switch
def _get_token(self):
"""
Returns the next token from the list of defined tokens
"""
# in case of the next token but not the last
if self.token_counter < len(GIT_TOKEN) - 1:
self.token_counter += 1
self.LOGGER.info(f"Changing token with: {GIT_TOKEN[self.token_counter]}")
return True
# in case of the last token
elif self.token_counter == len(GIT_TOKEN) - 1:
self.token_counter = 0
self.LOGGER.info(f"Changing token with: {GIT_TOKEN[self.token_counter]}")
return False
def pull(self):
"""
Pulls changes for a git repo
:return: the pulled changes
"""
self.LOGGER.info(f"pulling changes from {self.repo.remotes.origin.url} -> Branch {self.repo.active_branch}")
return self.repo.remotes.origin.pull(refspec=self.repo.active_branch)
def add(self):
"""
Adds modified files that will be commited
:return: True or False, depending if there are changes
"""
self.repo.git.add([CHANGELOG_JSON_PATH, CHANGELOG_MD_PATH, CHANGELOG_REPO_PATH], update=True)
return self.check_for_changes()
def check_for_changes(self):
"""
Checkes if there are any important changes regarding infra-tracking
:return: True or False, depending if there are changes
"""
if not self.repo.index.diff("HEAD"):
self.LOGGER.info("Nothing staged for commit. has the data or files changed?")
return False
return True
def commit(self):
"""
Commits the added changes
"""
self.LOGGER.info(f"Committing changes with message: Changelog: {return_time()}")
return self.repo.index.commit("Changelog: " + return_time(output_time_format="%Y-%m-%dT%H:%M:%S"))
def push_to_git(self):
"""
Pushes the commited changes to github
"""
self.LOGGER.info(f"Summary of pull: {FICGithub.pull(self)[0]}")
if FICGithub.add(self):
self.LOGGER.info(f"Summary of commit {FICGithub.commit(self)}")
self.LOGGER.info(f"pushing changes to {self.repo.remotes.origin.url} on branch {self.repo.active_branch}")
self.LOGGER.info(f"Summary of push: {self.repo.remotes.origin.push(refspec=self.repo.active_branch)[0].summary}")
def revert_modified_files(self):
"""
Undo any changes to files in case the script fails to run successfully
"""
return self.repo.git.checkout([CHANGELOG_JSON_PATH, CHANGELOG_MD_PATH, CHANGELOG_REPO_PATH])
def get_repo_url(self):
"""
It gets the repository URL.
:return: the URL of the repository
"""
return self.repo_data.svn_url
def _compare_versions(self):
"""
Checks the latest version of a repo locally, on github and in build-puppet if defined
:return: True if the checked places return different version, otherwise False
"""
if self.build_puppet_version == self.release_version and self.release_version != self.local_version:
return True
else:
return False
def _last_checked(self):
"""
:return: the last checked value in the json files. If there isn't one use DEFAULT_DAYS
"""
if json.load(self.load(CHANGELOG_REPO_PATH, self.repo_name.lower() + ".json")).get("0"):
self.last_check = json.load(self.load(CHANGELOG_REPO_PATH, self.repo_name.lower() + ".json")).get("0").get("last_checked")
else:
self.last_check = return_time(output_time_format="%Y-%m-%dT%H:%M:%S.%f", operation="sub", operation_days=DEFAULT_DAYS)
def _commit_iterator(self):
"""
Iterates through the commits of a repo and save it if contains infra changes
"""
for current_commit in self.repo_data.commits(since=self.last_check):
if self.limit_checker():
self._get_message(current_commit)
self._get_sha(current_commit)
self._get_files()
if self._commit_filter():
self.commit_number += 1
self._store_data(current_commit)
self._construct_commit()
self.keyword = None
def _store_data(self, current_commit):
"""
extracts the data we care about from a commit object
"""
self._get_date(current_commit)
self._get_author(current_commit)
self._get_author_email(current_commit)
self._get_url(current_commit)
def _compare_files(self):
"""
Checks to see if any of the files we track in a repo were changed
:return: True if any of the files we care about received changes
"""
for folder_to_check in range(len(self.folders_to_check)):
for changed_folder in range(len(self.commit_files_changed)):
if str(self.folders_to_check[folder_to_check]) in str(self.commit_files_changed[changed_folder]):
return True
def _construct_commit(self):
"""
Constructs the object that stores all the data we care about for a single commit
"""
self.list_of_commits.update({self.commit_number: {'sha': self.commit_sha,
'url': self.commit_url,
'author': self.commit_author,
'author_email': self.commit_author_email,
'message': self.commit_message,
'date': self.commit_date,
'files': f"This commit contains {len(self.commit_files_changed)} files changed."
}
})
def _commit_filter(self):
"""
Determines if we care about saving a commit
:return: True if the repo type and its conditions are met
"""
if self.repo_type == "commit-keyword":
if self.keyword in self.commit_message:
return True
elif self.repo_type == "tag":
if self.repo_name == "build-puppet":
return True
elif self.release_version in self.commit_message:
return True
elif len(self.folders_to_check) > 0:
if self._compare_files():
return True
else:
return True
def _repo_type_checker(self):
"""
Checks the type of a repo, as defined in repositories.json, and runs the appropriate methods
"""
if self.repo_type == "no-tag":
self._not_tag()
elif self.repo_type == "tag":
if self.repo_name == "build-puppet":
self._build_puppet()
else:
self._tag()
elif self.repo_type == "commit-keyword":
self._commit_keyword()
else:
self.LOGGER.critical(f"Repo type not defined for {self.repo_name}")
def _generate_first_element(self):
"""
Creates the first element which will be at the top of the json file
:return:
"""
if self.repo_type == "tag" and self.repo_name != "build-puppet":
return {"0": {"last_checked": return_time(output_time_format="%Y-%m-%dT%H:%M:%S.%f"), "version": self._get_release(1)}}
else:
return {"0": {"last_checked": return_time(output_time_format="%Y-%m-%dT%H:%M:%S.%f")}}
def start_git(self, repo_name=None):
"""
The entry point for git. Runs the entire logic for a one git repo at a time
:param repo_name: name of the git repo to be worked on
"""
self.list_of_commits = {}
self.repo_name = repo_name
self._extract_repo_type()
self._repo_team()
self.read_repo()
self._repo_files()
self.commit_number = 0
self.list_of_commits.update(self._generate_first_element())
self._repo_type_checker()
self._write_git_json()
def _check_commit_age(self, filter_list=None):
"""
Removes data that is older tham a set amount of days. Default = 30
:param filter_list: awaits a list of commit data
"""
for commit in range(1, len(filter_list)):
if return_time(input_time=filter_list[str(commit)]["date"], input_time_format="%Y-%m-%dT%H:%M:%SZ") > \
return_time(operation="sub", operation_days=INDIVIDUAL_REPO_DAYS):
self.commit_number += 1
self.list_of_commits.update({str(self.commit_number): filter_list[str(commit)]})
def _write_git_json(self):
"""
Writes the final, filtered, extracted data to json files
"""
local_json_data = json.load(self.load(CHANGELOG_REPO_PATH, self.repo_name.lower() + ".json"))
# In case we have no new commits to save
if len(self.list_of_commits) == 1:
local_json_data.update(self._generate_first_element())
self._check_commit_age(local_json_data)
self.save(CHANGELOG_REPO_PATH, self.repo_name + ".json", self.list_of_commits)
# In case we have new commits + local data
elif len(local_json_data) >= 1:
local_json_data.pop("0")
self._check_commit_age(local_json_data)
self.save(CHANGELOG_REPO_PATH, self.repo_name + ".json", self.list_of_commits)
# In case we have new commits and NO local data
else:
self.save(CHANGELOG_REPO_PATH, self.repo_name + ".json", self.list_of_commits)
| 40.301471
| 156
| 0.614669
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import github3
from modules.FIC_FileHandler import FICFileHandler
from modules.FIC_DataVault import FICDataVault
from modules.config import *
from modules.FIC_Utilities import return_time
from modules.config import CHANGELOG_JSON_PATH, CHANGELOG_MD_PATH, CHANGELOG_REPO_PATH, INDIVIDUAL_REPO_DAYS
from git import Repo
import os
import json
import requests
import re
class FICGithub(FICFileHandler, FICDataVault):
"""
This class handles the extracting, filtering and writing data into json file for git repos
that affect firefox infra
"""
def __init__(self):
FICFileHandler.__init__(self)
FICDataVault.__init__(self)
self.token_counter = 0
self._get_os_var()
self._token = os.environ.get(GIT_TOKEN[self.token_counter])
self._gh = self._auth()
self.repo_data = None
self.repo = Repo(self.construct_path(None, None))
def _auth(self):
return github3.login(token=self._token)
def read_repo(self):
"""
It calls the method that gets all the repository data, using it's name, team and the authentication credential
as parameters.
"""
return self._init_github(self._gh, self.team_name, self.repo_name)
def _init_github(self, *args):
self.repo_data = github3.GitHub.repository(args[0], args[1], args[2])
return self.repo_data
def _get_os_var(self):
for var in os.environ:
# append the OS.VAR to the list in case of no duplicate
if "GIT_TOKEN" in var and var not in GIT_TOKEN:
GIT_TOKEN.append(var)
self.LOGGER.info(f"The list of available tokens: {GIT_TOKEN}")
def _get_reset_time(self):
reset_time = return_time(input_time=self._gh.rate_limit()['rate']['reset'], input_time_unix=True)
self.LOGGER.info(f"Rate limit reset in: {reset_time - return_time()}")
return reset_time
def limit_checker(self):
"""Checks for the amount of limit requests remaining on a specific token.
Switches to a new token from a group of defined tokens upon reaching 5 requests remaining
"""
limit_requests = self._gh.ratelimit_remaining
self.LOGGER.info(f"The number of limit requests is: {limit_requests}")
if limit_requests < 5 and len(GIT_TOKEN) > 1:
# switch token
if self._switch_token():
return True
else:
# check if the rate limit was reset for the second use of a token
if limit_requests < 5:
self._get_reset_time()
return False
else:
return True
# check the reset time in case of a single token
elif limit_requests < 5:
self._get_reset_time()
return False
# return True in case of limit request not reached
else:
return True
def _switch_token(self):
"""
Method that switches git tokens
"""
# get next token
switch = self._get_token()
# re-logging with the new token
self._token = os.environ.get(GIT_TOKEN[self.token_counter])
self._gh = self._auth()
self.LOGGER.info("The token was changed.")
return switch
def _get_token(self):
"""
Returns the next token from the list of defined tokens
"""
# in case of the next token but not the last
if self.token_counter < len(GIT_TOKEN) - 1:
self.token_counter += 1
self.LOGGER.info(f"Changing token with: {GIT_TOKEN[self.token_counter]}")
return True
# in case of the last token
elif self.token_counter == len(GIT_TOKEN) - 1:
self.token_counter = 0
self.LOGGER.info(f"Changing token with: {GIT_TOKEN[self.token_counter]}")
return False
def pull(self):
"""
Pulls changes for a git repo
:return: the pulled changes
"""
self.LOGGER.info(f"pulling changes from {self.repo.remotes.origin.url} -> Branch {self.repo.active_branch}")
return self.repo.remotes.origin.pull(refspec=self.repo.active_branch)
def add(self):
"""
Adds modified files that will be commited
:return: True or False, depending if there are changes
"""
self.repo.git.add([CHANGELOG_JSON_PATH, CHANGELOG_MD_PATH, CHANGELOG_REPO_PATH], update=True)
return self.check_for_changes()
def check_for_changes(self):
"""
Checkes if there are any important changes regarding infra-tracking
:return: True or False, depending if there are changes
"""
if not self.repo.index.diff("HEAD"):
self.LOGGER.info("Nothing staged for commit. has the data or files changed?")
return False
return True
def commit(self):
"""
Commits the added changes
"""
self.LOGGER.info(f"Committing changes with message: Changelog: {return_time()}")
return self.repo.index.commit("Changelog: " + return_time(output_time_format="%Y-%m-%dT%H:%M:%S"))
def push_to_git(self):
"""
Pushes the commited changes to github
"""
self.LOGGER.info(f"Summary of pull: {FICGithub.pull(self)[0]}")
if FICGithub.add(self):
self.LOGGER.info(f"Summary of commit {FICGithub.commit(self)}")
self.LOGGER.info(f"pushing changes to {self.repo.remotes.origin.url} on branch {self.repo.active_branch}")
self.LOGGER.info(f"Summary of push: {self.repo.remotes.origin.push(refspec=self.repo.active_branch)[0].summary}")
def revert_modified_files(self):
"""
Undo any changes to files in case the script fails to run successfully
"""
return self.repo.git.checkout([CHANGELOG_JSON_PATH, CHANGELOG_MD_PATH, CHANGELOG_REPO_PATH])
def get_repo_url(self):
"""
It gets the repository URL.
:return: the URL of the repository
"""
return self.repo_data.svn_url
def _repo_team(self):
self.team_name = json.load(self.load(None, REPOSITORIES_FILE)).get("Github").get(self.repo_name).get("team")
def _repo_files(self):
self.folders_to_check = json.load(self.load(None, REPOSITORIES_FILE)).get("Github").get(self.repo_name).get("configuration").get("folders-to-check")
def _extract_repo_type(self):
self.repo_type = json.load(self.load(None, REPOSITORIES_FILE)).get("Github").get(self.repo_name).get("configuration").get("type")
def _local_version(self):
if json.load(self.load(CHANGELOG_REPO_PATH, self.repo_name.lower() + ".json")).get("0"):
self.local_version = json.load(self.load(CHANGELOG_REPO_PATH, self.repo_name.lower() + ".json")).get("0").get("version")
def _get_release(self, release_number):
return [tag for tag in self.repo_data.tags(number=release_number)][release_number - 1].name
def _get_version_path(self):
self.version_path = json.load(self.load(None, REPOSITORIES_FILE)).get("Github").get(self.repo_name).get("configuration").get("version-path")
def _build_puppet_version(self):
self._get_version_path()
for requirements in requests.get(self.version_path).text.split():
if self.repo_name in requirements:
self.build_puppet_version = re.split("\\b==\\b", requirements)[-1]
return True
def _compare_versions(self):
"""
Checks the latest version of a repo locally, on github and in build-puppet if defined
:return: True if the checked places return different version, otherwise False
"""
if self.build_puppet_version == self.release_version and self.release_version != self.local_version:
return True
else:
return False
def _last_checked(self):
"""
:return: the last checked value in the json files. If there isn't one use DEFAULT_DAYS
"""
if json.load(self.load(CHANGELOG_REPO_PATH, self.repo_name.lower() + ".json")).get("0"):
self.last_check = json.load(self.load(CHANGELOG_REPO_PATH, self.repo_name.lower() + ".json")).get("0").get("last_checked")
else:
self.last_check = return_time(output_time_format="%Y-%m-%dT%H:%M:%S.%f", operation="sub", operation_days=DEFAULT_DAYS)
def _commit_iterator(self):
"""
Iterates through the commits of a repo and save it if contains infra changes
"""
for current_commit in self.repo_data.commits(since=self.last_check):
if self.limit_checker():
self._get_message(current_commit)
self._get_sha(current_commit)
self._get_files()
if self._commit_filter():
self.commit_number += 1
self._store_data(current_commit)
self._construct_commit()
self.keyword = None
def _store_data(self, current_commit):
"""
extracts the data we care about from a commit object
"""
self._get_date(current_commit)
self._get_author(current_commit)
self._get_author_email(current_commit)
self._get_url(current_commit)
def _get_sha(self, commit):
self.commit_sha = commit.sha
return self.commit_sha
def _get_message(self, commit):
self.commit_message = commit.message
return self.commit_message
def _get_date(self, commit):
self.commit_date = commit.commit.author.get("date")
return self.commit_date
def _get_author(self, commit):
self.commit_author = commit.commit.author.get("name")
return self.commit_author
def _get_author_email(self, commit):
self.commit_author_email = commit.commit.author.get("email")
return self.commit_author_email
def _get_url(self, commit):
self.commit_url = commit.url
return self.commit_url
def _get_files(self):
self.commit_files_changed = []
for item in (range(len(self.repo_data.commit(sha=self.commit_sha).files))):
self.commit_files_changed.append(self.repo_data.commit(sha=self.commit_sha).files[item].get('filename'))
def _compare_files(self):
"""
Checks to see if any of the files we track in a repo were changed
:return: True if any of the files we care about received changes
"""
for folder_to_check in range(len(self.folders_to_check)):
for changed_folder in range(len(self.commit_files_changed)):
if str(self.folders_to_check[folder_to_check]) in str(self.commit_files_changed[changed_folder]):
return True
def _construct_commit(self):
"""
Constructs the object that stores all the data we care about for a single commit
"""
self.list_of_commits.update({self.commit_number: {'sha': self.commit_sha,
'url': self.commit_url,
'author': self.commit_author,
'author_email': self.commit_author_email,
'message': self.commit_message,
'date': self.commit_date,
'files': f"This commit contains {len(self.commit_files_changed)} files changed."
}
})
def _commit_filter(self):
"""
Determines if we care about saving a commit
:return: True if the repo type and its conditions are met
"""
if self.repo_type == "commit-keyword":
if self.keyword in self.commit_message:
return True
elif self.repo_type == "tag":
if self.repo_name == "build-puppet":
return True
elif self.release_version in self.commit_message:
return True
elif len(self.folders_to_check) > 0:
if self._compare_files():
return True
else:
return True
def _not_tag(self):
self._last_checked()
self._commit_iterator()
def _build_puppet(self):
self._last_checked()
self._commit_iterator()
def _tag(self):
self._last_checked()
self.release_version = self._get_release(1)
self._local_version()
if self.repo_name == "mozapkpublisher" and self.release_version != self.local_version:
self._commit_iterator()
else:
self._build_puppet_version()
if self._compare_versions():
self._commit_iterator()
def _commit_keyword(self):
self._last_checked()
self.keyword = 'deploy'
self._commit_iterator()
def _repo_type_checker(self):
"""
Checks the type of a repo, as defined in repositories.json, and runs the appropriate methods
"""
if self.repo_type == "no-tag":
self._not_tag()
elif self.repo_type == "tag":
if self.repo_name == "build-puppet":
self._build_puppet()
else:
self._tag()
elif self.repo_type == "commit-keyword":
self._commit_keyword()
else:
self.LOGGER.critical(f"Repo type not defined for {self.repo_name}")
def _generate_first_element(self):
"""
Creates the first element which will be at the top of the json file
:return:
"""
if self.repo_type == "tag" and self.repo_name != "build-puppet":
return {"0": {"last_checked": return_time(output_time_format="%Y-%m-%dT%H:%M:%S.%f"), "version": self._get_release(1)}}
else:
return {"0": {"last_checked": return_time(output_time_format="%Y-%m-%dT%H:%M:%S.%f")}}
def start_git(self, repo_name=None):
"""
The entry point for git. Runs the entire logic for a one git repo at a time
:param repo_name: name of the git repo to be worked on
"""
self.list_of_commits = {}
self.repo_name = repo_name
self._extract_repo_type()
self._repo_team()
self.read_repo()
self._repo_files()
self.commit_number = 0
self.list_of_commits.update(self._generate_first_element())
self._repo_type_checker()
self._write_git_json()
def _check_commit_age(self, filter_list=None):
"""
Removes data that is older tham a set amount of days. Default = 30
:param filter_list: awaits a list of commit data
"""
for commit in range(1, len(filter_list)):
if return_time(input_time=filter_list[str(commit)]["date"], input_time_format="%Y-%m-%dT%H:%M:%SZ") > \
return_time(operation="sub", operation_days=INDIVIDUAL_REPO_DAYS):
self.commit_number += 1
self.list_of_commits.update({str(self.commit_number): filter_list[str(commit)]})
def _write_git_json(self):
"""
Writes the final, filtered, extracted data to json files
"""
local_json_data = json.load(self.load(CHANGELOG_REPO_PATH, self.repo_name.lower() + ".json"))
# In case we have no new commits to save
if len(self.list_of_commits) == 1:
local_json_data.update(self._generate_first_element())
self._check_commit_age(local_json_data)
self.save(CHANGELOG_REPO_PATH, self.repo_name + ".json", self.list_of_commits)
# In case we have new commits + local data
elif len(local_json_data) >= 1:
local_json_data.pop("0")
self._check_commit_age(local_json_data)
self.save(CHANGELOG_REPO_PATH, self.repo_name + ".json", self.list_of_commits)
# In case we have new commits and NO local data
else:
self.save(CHANGELOG_REPO_PATH, self.repo_name + ".json", self.list_of_commits)
| 3,567
| 0
| 620
|
e33d55bf18910e145547acf32449abfce2faec4d
| 143
|
py
|
Python
|
tests/.ipynb_checkpoints/test_clocklike_reconstruction-checkpoint.py
|
mroctavious/Phylogeny
|
014461b5ec6323f42cf1c913f6a9bdba42e3bc05
|
[
"MIT"
] | 2
|
2020-01-17T17:19:15.000Z
|
2021-04-18T22:27:59.000Z
|
tests/.ipynb_checkpoints/test_clocklike_reconstruction-checkpoint.py
|
mroctavious/Phylogeny
|
014461b5ec6323f42cf1c913f6a9bdba42e3bc05
|
[
"MIT"
] | null | null | null |
tests/.ipynb_checkpoints/test_clocklike_reconstruction-checkpoint.py
|
mroctavious/Phylogeny
|
014461b5ec6323f42cf1c913f6a9bdba42e3bc05
|
[
"MIT"
] | 2
|
2018-08-30T20:57:37.000Z
|
2020-09-09T06:29:02.000Z
|
from phylogeny.clocklike_reconstruction import simpledistance
| 35.75
| 61
| 0.748252
|
from phylogeny.clocklike_reconstruction import simpledistance
def test_simpledistance():
assert simpledistance([0]*10, [1]*4 + [0]*6) == 4
| 59
| 0
| 23
|
b79293430cd9bd198caf3f11984b1aef0fd80517
| 235
|
py
|
Python
|
crawler/yhs_crawler/yhs_data_analyze.py
|
ArseneLupinhb/py_al
|
e2e4d25a00cb13d68da26c17f86f9cf1e47a79e1
|
[
"MIT"
] | 1
|
2020-04-14T03:32:56.000Z
|
2020-04-14T03:32:56.000Z
|
crawler/yhs_crawler/yhs_data_analyze.py
|
ArseneLupinhb/py_al
|
e2e4d25a00cb13d68da26c17f86f9cf1e47a79e1
|
[
"MIT"
] | null | null | null |
crawler/yhs_crawler/yhs_data_analyze.py
|
ArseneLupinhb/py_al
|
e2e4d25a00cb13d68da26c17f86f9cf1e47a79e1
|
[
"MIT"
] | null | null | null |
# 导入库
import os
import pandas as pd
os.getcwd()
source_path = os.getcwd() + r'/crawler/yhs_crawler/data/'
# 读入数据
df_1 = pd.read_csv(source_path + 'yhs弹幕.csv', encoding='utf-8', engine='python')
df_1.head(20)
df_1.info()
df_1.shape
| 15.666667
| 80
| 0.702128
|
# 导入库
import os
import pandas as pd
os.getcwd()
source_path = os.getcwd() + r'/crawler/yhs_crawler/data/'
# 读入数据
df_1 = pd.read_csv(source_path + 'yhs弹幕.csv', encoding='utf-8', engine='python')
df_1.head(20)
df_1.info()
df_1.shape
| 0
| 0
| 0
|
66e7059707f09e1697e34f01e13af035f5eb1282
| 161
|
py
|
Python
|
PyRssReader/converter/convertertype.py
|
mohi/PyRssReader
|
9c1b07fa7574af9acd827946702e69e0ab2c5fab
|
[
"MIT"
] | 4
|
2021-05-24T12:10:12.000Z
|
2021-08-04T14:29:47.000Z
|
PyRssReader/converter/convertertype.py
|
mohi/PyRssReader
|
9c1b07fa7574af9acd827946702e69e0ab2c5fab
|
[
"MIT"
] | null | null | null |
PyRssReader/converter/convertertype.py
|
mohi/PyRssReader
|
9c1b07fa7574af9acd827946702e69e0ab2c5fab
|
[
"MIT"
] | null | null | null |
from enum import Enum
class ConverterType(Enum):
"""
Implments naming for type of convert operations
"""
NONE = 0
CUT = 1
REPLACE = 2
| 13.416667
| 51
| 0.608696
|
from enum import Enum
class ConverterType(Enum):
"""
Implments naming for type of convert operations
"""
NONE = 0
CUT = 1
REPLACE = 2
| 0
| 0
| 0
|
073344b2c4b77653eeb9ec24d9ddb7b328ac702a
| 461
|
py
|
Python
|
pyfinder/theories/poset.py
|
punkdit/pyfinder
|
b59e5ecc4e8b10b3e4365750834090473b873ff6
|
[
"BSD-2-Clause"
] | null | null | null |
pyfinder/theories/poset.py
|
punkdit/pyfinder
|
b59e5ecc4e8b10b3e4365750834090473b873ff6
|
[
"BSD-2-Clause"
] | null | null | null |
pyfinder/theories/poset.py
|
punkdit/pyfinder
|
b59e5ecc4e8b10b3e4365750834090473b873ff6
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
from pyfinder.expr import *
#______________________________________________________________________________
# Poset theory.
element = Sort('element')
le = Function('le', [element, element], BOOL)
a = Variable('a', element)
b = Variable('b', element)
c = Variable('c', element)
theory = Theory([
le(a, a),
(le(a, b) & le(b, c)).implies(le(a, c)),
(le(a, b) & le(b, a)).implies(a==b),
])
sorts = [element]
funcs = [le]
| 17.074074
| 79
| 0.642082
|
#!/usr/bin/env python
from pyfinder.expr import *
#______________________________________________________________________________
# Poset theory.
element = Sort('element')
le = Function('le', [element, element], BOOL)
a = Variable('a', element)
b = Variable('b', element)
c = Variable('c', element)
theory = Theory([
le(a, a),
(le(a, b) & le(b, c)).implies(le(a, c)),
(le(a, b) & le(b, a)).implies(a==b),
])
sorts = [element]
funcs = [le]
| 0
| 0
| 0
|
92e5cf0d3f91ede077fb779a1a97cbf351754b1f
| 11,755
|
py
|
Python
|
tests/select_test.py
|
BryanCutler/graph_def_editor
|
e8c44fe003df4f0e2c9791507abae556ee70bd71
|
[
"Apache-2.0"
] | 13
|
2020-06-20T15:27:45.000Z
|
2021-08-30T21:12:52.000Z
|
tests/select_test.py
|
BryanCutler/graph_def_editor
|
e8c44fe003df4f0e2c9791507abae556ee70bd71
|
[
"Apache-2.0"
] | null | null | null |
tests/select_test.py
|
BryanCutler/graph_def_editor
|
e8c44fe003df4f0e2c9791507abae556ee70bd71
|
[
"Apache-2.0"
] | 4
|
2020-11-03T08:20:22.000Z
|
2020-12-27T17:45:41.000Z
|
# Copyright 2018 IBM. All Rights Reserved.
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
import unittest
import graph_def_editor as gde
if __name__ == "__main__":
unittest.main()
| 36.058282
| 80
| 0.642365
|
# Copyright 2018 IBM. All Rights Reserved.
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
import unittest
import graph_def_editor as gde
class SelectTest(unittest.TestCase):
# TODO(frreiss): Merge duplicate setup code across test cases
def setUp(self):
tf_graph = tf.Graph()
with tf_graph.as_default():
a = tf.constant([1., 1.], shape=[2], name="a")
with tf.name_scope("foo"):
b = tf.constant([2., 2.], shape=[2], name="b")
c = tf.add(a, b, name="c")
d = tf.constant([3., 3.], shape=[2], name="d")
with tf.name_scope("bar"):
e = tf.add(c, d, name="e")
f = tf.add(c, d, name="f")
g = tf.add(c, a, name="g")
with tf.control_dependencies([c.op]):
h = tf.add(f, g, name="h")
self.graph = gde.Graph(tf_graph)
self.a = self.graph.get_tensor_by_name(a.name)
self.b = self.graph.get_tensor_by_name(b.name)
self.c = self.graph.get_tensor_by_name(c.name)
self.d = self.graph.get_tensor_by_name(d.name)
self.e = self.graph.get_tensor_by_name(e.name)
self.f = self.graph.get_tensor_by_name(f.name)
self.g = self.graph.get_tensor_by_name(g.name)
self.h = self.graph.get_tensor_by_name(h.name)
def test_regex(self):
"""Test for ge.can_be_regex and ge.make_regex."""
self.assertTrue(gde.can_be_regex("foo"))
self.assertTrue(gde.can_be_regex(re.compile("foo")))
regex = re.compile("foo")
self.assertIs(gde.make_regex(regex), regex)
def test_get_input_output_ts(self):
"""Test for ge._get_input_ts abd ge._get_output_ts."""
self.assertEqual(len(gde.select._get_input_ts(self.graph)), 6)
self.assertEqual(len(gde.select._get_output_ts(self.graph)), 8)
def test_get_filter(self):
"""Test for various filtering operations on ts ops."""
# TODO(fkp): parameterize
self.assertEqual(len(gde.filter_ops(self.graph, True)), 8)
self.assertEqual(
len(gde.filter_ops(self.graph,
lambda op: op.op_type == "Const")), 3)
self.assertEqual(
len(gde.filter_ops(self.graph, lambda op: op.op_type == "Add")), 5)
self.assertEqual(
len(gde.filter_ops_from_regex(self.graph, r"^.*\b[abc]$")), 3)
self.assertEqual(len(gde.filter_ts(self.graph, True)), 8)
self.assertEqual(
len(gde.filter_ts_from_regex(self.graph, r"^.*/[fgh]:\d$")), 3)
self.assertEqual(len(gde.get_name_scope_ops(self.graph, "foo/")), 7)
self.assertEqual(len(gde.get_name_scope_ops(self.graph, "foo/bar")), 4)
def test_get_ops_ios(self):
"""Test for ge.get_ops_ios."""
control_outputs = gde.util.ControlOutputs(self.graph)
self.assertEqual(
len(gde.get_ops_ios(self.h.op, control_ios=control_outputs)), 3)
self.assertEqual(len(gde.get_ops_ios(self.h.op)), 2)
self.assertEqual(
len(gde.get_ops_ios(self.c.op, control_ios=control_outputs)), 6)
self.assertEqual(len(gde.get_ops_ios(self.c.op)), 5)
def test_compute_boundary_ts_0(self):
"""Test for ge.compute_boundary_ts."""
input_ts, output_ts, inside_ts = gde.compute_boundary_ts(self.g.op)
self.assertEqual(list(input_ts), [self.c, self.a])
self.assertEqual(list(output_ts), [self.g])
self.assertEqual(list(inside_ts), [])
def test_compute_boundary_ts_1(self):
"""Test for ge.compute_boundary_ts."""
input_ts, output_ts, inside_ts = gde.compute_boundary_ts(
[self.g.op, self.h.op])
self.assertEqual(list(input_ts), [self.c, self.a, self.f])
self.assertEqual(list(output_ts), [self.h])
self.assertEqual(list(inside_ts), [self.g])
def test_compute_boundary_ts_2(self):
"""Test for ge.compute_boundary_ts."""
tf_graph = tf.Graph()
with tf_graph.as_default():
a_tensor = tf.constant(1, name="a")
b_tensor = tf.constant(1, name="b")
c_tensor = tf.add(a_tensor, b_tensor, name="c")
_ = a_tensor + c_tensor
g = gde.Graph(tf_graph)
input_ts, output_ts, inside_ts = gde.compute_boundary_ts([g["a"], g["c"]])
self.assertEqual(list(input_ts), [g["b"].output(0)])
self.assertEqual(list(output_ts), [g["a"].output(0), g["c"].output(0)])
self.assertEqual(list(inside_ts), [g["a"].output(0)])
def test_get_within_boundary_ops_0(self):
"""Test for test_get_within_boundary_ops."""
control_outputs = gde.util.ControlOutputs(self.graph)
ops = gde.get_within_boundary_ops(
ops=self.graph,
seed_ops=self.f.op,
boundary_ops=[self.c.op, self.h.op],
inclusive=False,
control_ios=control_outputs)
self.assertEqual(len(ops), 3)
def test_get_within_boundary_ops_1(self):
"""Test for ge.test_get_within_boundary_ops."""
ops = gde.get_within_boundary_ops(
ops=self.graph, seed_ops=self.h.op, boundary_ops=[self.f.op, self.g.op])
self.assertEqual(len(ops), 3)
def test_get_walks_intersection(self):
"""Test for ge.get_walks_intersection_ops."""
ops = gde.get_walks_intersection_ops([self.c.op], [self.g.op])
self.assertEqual(len(ops), 2)
ops = gde.get_walks_intersection_ops([self.a.op], [self.f.op])
self.assertEqual(len(ops), 3)
self.assertTrue(self.a.op in ops)
self.assertTrue(self.c.op in ops)
self.assertTrue(self.f.op in ops)
within_ops = [self.a.op, self.f.op]
ops = gde.get_walks_intersection_ops(
[self.a.op], [self.f.op], within_ops=within_ops)
self.assertEqual(len(ops), 0)
def within_ops_fn(op):
return op in [self.a.op, self.f.op]
ops = gde.get_walks_intersection_ops(
[self.a.op], [self.f.op], within_ops_fn=within_ops_fn)
self.assertEqual(len(ops), 0)
def test_get_walks_union(self):
"""Test for ge.get_walks_union_ops."""
ops = gde.get_walks_union_ops([self.f.op], [self.g.op])
self.assertEqual(len(ops), 6)
ops = gde.get_walks_union_ops([self.a.op], [self.f.op])
self.assertEqual(len(ops), 8)
within_ops = [self.a.op, self.c.op, self.d.op, self.f.op]
ops = gde.get_walks_union_ops([self.a.op], [self.f.op],
within_ops=within_ops)
self.assertEqual(len(ops), 4)
self.assertTrue(self.b.op not in ops)
def within_ops_fn(op):
return op in [self.a.op, self.c.op, self.f.op]
ops = gde.get_walks_union_ops([self.a.op], [self.f.op],
within_ops_fn=within_ops_fn)
self.assertEqual(len(ops), 3)
self.assertTrue(self.b.op not in ops)
self.assertTrue(self.d.op not in ops)
def test_select_ops(self):
parameters = (
(("^foo/",), 7),
(("^foo/bar/",), 4),
(("^foo/bar/", "a"), 5),
)
for param, length in parameters:
ops = gde.select_ops(*param, graph=self.graph)
self.assertEqual(len(ops), length)
def test_select_ts(self):
parameters = (
(".*:0", 8),
(r".*/bar/\w+:0", 4),
)
for regex, length in parameters:
ts = gde.select_ts(regex, graph=self.graph)
self.assertEqual(len(ts), length)
def test_select_ops_and_ts(self):
parameters = (
(("^foo/.*",), 7, 0),
(("^foo/.*", "(?#ts)^foo/bar/.*"), 7, 4),
)
for param, l0, l1 in parameters:
ops, ts = gde.select_ops_and_ts(*param, graph=self.graph)
self.assertEqual(len(ops), l0)
self.assertEqual(len(ts), l1)
def test_forward_walk_ops(self):
seed_ops = [self.a.op, self.d.op]
# Include all ops except for self.g.op
within_ops = [
x.op for x in [self.a, self.b, self.c, self.d, self.e, self.f, self.h]
]
# For the fn, exclude self.e.op.
def within_ops_fn(op):
return op not in (self.e.op,)
stop_at_ts = (self.f,)
# No b.op since it's an independent source node.
# No g.op from within_ops.
# No e.op from within_ops_fn.
# No h.op from stop_at_ts and within_ops.
ops = gde.select.get_forward_walk_ops(
seed_ops,
inclusive=True,
within_ops=within_ops,
within_ops_fn=within_ops_fn,
stop_at_ts=stop_at_ts)
self.assertEqual(
set(ops), {self.a.op, self.c.op, self.d.op, self.f.op })
# Also no a.op and d.op when inclusive=False
ops = gde.select.get_forward_walk_ops(
seed_ops,
inclusive=False,
within_ops=within_ops,
within_ops_fn=within_ops_fn,
stop_at_ts=stop_at_ts)
self.assertEqual(set(ops), {self.c.op, self.f.op})
# Not using within_ops_fn adds e.op.
ops = gde.select.get_forward_walk_ops(
seed_ops,
inclusive=False,
within_ops=within_ops,
stop_at_ts=stop_at_ts)
self.assertEqual(set(ops), {self.c.op, self.e.op, self.f.op})
# Not using stop_at_ts adds back h.op.
ops = gde.select.get_forward_walk_ops(
seed_ops, inclusive=False, within_ops=within_ops)
self.assertEqual(
set(ops), {self.c.op, self.e.op, self.f.op, self.h.op})
# Starting just form a (the tensor, not op) omits a, b, d.
ops = gde.select.get_forward_walk_ops([self.a], inclusive=True)
self.assertEqual(
set(ops), {self.c.op, self.e.op, self.f.op, self.g.op, self.h.op})
def test_backward_walk_ops(self):
seed_ops = [self.h.op]
# Include all ops except for self.g.op
within_ops = [
x.op for x in [self.a, self.b, self.c, self.d, self.e, self.f, self.h]
]
# For the fn, exclude self.c.op.
def within_ops_fn(op):
return op not in (self.c.op,)
stop_at_ts = (self.f,)
# Backward walk only includes h since we stop at f and g is not within.
ops = gde.select.get_backward_walk_ops(
seed_ops,
inclusive=True,
within_ops=within_ops,
within_ops_fn=within_ops_fn,
stop_at_ts=stop_at_ts)
self.assertEqual(set(ops), {self.h.op})
# If we do inclusive=False, the result is empty.
ops = gde.select.get_backward_walk_ops(
seed_ops,
inclusive=False,
within_ops=within_ops,
within_ops_fn=within_ops_fn,
stop_at_ts=stop_at_ts)
self.assertEqual(set(ops), set())
# Removing stop_at_fs adds f.op, d.op.
ops = gde.select.get_backward_walk_ops(
seed_ops,
inclusive=True,
within_ops=within_ops,
within_ops_fn=within_ops_fn)
self.assertEqual(set(ops), {self.d.op, self.f.op, self.h.op})
# Not using within_ops_fn adds back ops for a, b, c.
ops = gde.select.get_backward_walk_ops(
seed_ops, inclusive=True, within_ops=within_ops)
self.assertEqual(
set(ops),
{self.a.op, self.b.op, self.c.op, self.d.op, self.f.op, self.h.op})
# Vanially backward search via self.h.op includes everything except e.op.
ops = gde.select.get_backward_walk_ops(seed_ops, inclusive=True)
self.assertEqual(
set(ops),
{self.a.op, self.b.op, self.c.op, self.d.op, self.f.op, self.g.op,
self.h.op})
if __name__ == "__main__":
unittest.main()
| 5,292
| 5,418
| 23
|