code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from .logic import LogicAdapter
class ClosestMatchAdapter(LogicAdapter):
def get(self, text, list_of_statements):
"""
Takes a statement string and a list of statement strings.
Returns the closest matching statement from the list.
"""
from fuzzywuzzy import process
# If the list is empty, return the statement
if not list_of_statements:
return text
# Check if an exact match exists
if text in list_of_statements:
return text
# Get the closest matching statement from the database
return process.extract(text, list_of_statements, limit=1)[0][0]
|
karajrish/ChatterBot
|
chatterbot/adapters/logic/closest_match.py
|
Python
|
bsd-3-clause
| 666
|
"""Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
"set_printoptions", "get_printoptions", "printoptions",
"format_float_positional", "format_float_scientific"]
__docformat__ = 'restructuredtext'
#
# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
# scalars but for different purposes. scalartypes.c.src has str/reprs for when
# the scalar is printed on its own, while arrayprint.py has strs for when
# scalars are printed inside an ndarray. Only the latter strs are currently
# user-customizable.
import functools
import numbers
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
import numpy as np
from . import numerictypes as _nt
from .umath import absolute, isinf, isfinite, isnat
from . import multiarray
from .multiarray import (array, dragon4_positional, dragon4_scientific,
datetime_as_string, datetime_data, ndarray,
set_legacy_print_mode)
from .fromnumeric import any
from .numeric import concatenate, asarray, errstate
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
flexible)
from .overrides import array_function_dispatch, set_module
import operator
import warnings
import contextlib
_format_options = {
'edgeitems': 3, # repr N leading and trailing items of each dimension
'threshold': 1000, # total items > triggers array summarization
'floatmode': 'maxprec',
'precision': 8, # precision of floating point representations
'suppress': False, # suppress printing small floating values in exp format
'linewidth': 75,
'nanstr': 'nan',
'infstr': 'inf',
'sign': '-',
'formatter': None,
'legacy': False}
def _make_options_dict(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
sign=None, formatter=None, floatmode=None, legacy=None):
""" make a dictionary out of the non-None arguments, plus sanity checks """
options = {k: v for k, v in locals().items() if v is not None}
if suppress is not None:
options['suppress'] = bool(suppress)
modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
if floatmode not in modes + [None]:
raise ValueError("floatmode option must be one of " +
", ".join('"{}"'.format(m) for m in modes))
if sign not in [None, '-', '+', ' ']:
raise ValueError("sign option must be one of ' ', '+', or '-'")
if legacy not in [None, False, '1.13']:
warnings.warn("legacy printing option can currently only be '1.13' or "
"`False`", stacklevel=3)
if threshold is not None:
# forbid the bad threshold arg suggested by stack overflow, gh-12351
if not isinstance(threshold, numbers.Number):
raise TypeError("threshold must be numeric")
if np.isnan(threshold):
raise ValueError("threshold must be non-NAN, try "
"sys.maxsize for untruncated representation")
if precision is not None:
# forbid the bad precision arg as suggested by issue #18254
try:
options['precision'] = operator.index(precision)
except TypeError as e:
raise TypeError('precision must be an integer') from e
return options
@set_module('numpy')
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
formatter=None, sign=None, floatmode=None, *, legacy=None):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int or None, optional
Number of digits of precision for floating point output (default 8).
May be None if `floatmode` is not `fixed`, to print as many digits as
necessary to uniquely specify the value.
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
To always use the full repr without summarization, pass `sys.maxsize`.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
If True, always print floating point numbers using fixed point
notation, in which case numbers equal to zero in the current precision
will print as zero. If False, then scientific notation is used when
absolute value of the smallest number is < 1e-4 or the ratio of the
maximum absolute value to the minimum is > 1e3. The default is False.
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values. (default '-')
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are:
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'object' : `np.object_` arrays
Other keys that can be used to set a group of types at once are:
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'numpystr'
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values
(default maxprec_equal):
* 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
* 'unique': Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
* 'maxprec': Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
* 'maxprec_equal': Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
See Also
--------
get_printoptions, printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
Use `printoptions` as a context manager to set the values temporarily.
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> np.array([1.123456789])
[1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> np.arange(10)
array([0, 1, 2, ..., 7, 8, 9])
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
A custom formatter can be used to display array elements as desired:
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
>>> x = np.arange(3)
>>> x
array([int: 0, int: -1, int: -2])
>>> np.set_printoptions() # formatter gets reset
>>> x
array([0, 1, 2])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3, infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
Also to temporarily override options, use `printoptions` as a context manager:
>>> with np.printoptions(precision=2, suppress=True, threshold=5):
... np.linspace(0, 10, 10)
array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ])
"""
opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
suppress, nanstr, infstr, sign, formatter,
floatmode, legacy)
# formatter is always reset
opt['formatter'] = formatter
_format_options.update(opt)
# set the C variable for legacy mode
if _format_options['legacy'] == '1.13':
set_legacy_print_mode(113)
# reset the sign option in legacy mode to avoid confusion
_format_options['sign'] = '-'
elif _format_options['legacy'] is False:
set_legacy_print_mode(0)
@set_module('numpy')
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
- formatter : dict of callables
- sign : str
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, printoptions, set_string_function
"""
return _format_options.copy()
@set_module('numpy')
@contextlib.contextmanager
def printoptions(*args, **kwargs):
"""Context manager for setting print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `set_printoptions` for the full description of
available options.
Examples
--------
>>> from numpy.testing import assert_equal
>>> with np.printoptions(precision=2):
... np.array([2.0]) / 3
array([0.67])
The `as`-clause of the `with`-statement gives the current print options:
>>> with np.printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
try:
np.set_printoptions(*args, **kwargs)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _leading_trailing(a, edgeitems, index=()):
"""
Keep only the N-D corners (leading and trailing edges) of an array.
Should be passed a base-class ndarray, since it makes no guarantees about
preserving subclasses.
"""
axis = len(index)
if axis == a.ndim:
return a[index]
if a.shape[axis] > 2*edgeitems:
return concatenate((
_leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
_leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
), axis=axis)
else:
return _leading_trailing(a, edgeitems, index + np.index_exp[:])
def _object_format(o):
""" Object arrays containing lists should be printed unambiguously """
if type(o) is list:
fmt = 'list({!r})'
else:
fmt = '{!r}'
return fmt.format(o)
def repr_format(x):
return repr(x)
def str_format(x):
return str(x)
def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy,
formatter, **kwargs):
# note: extra arguments in kwargs are ignored
# wrapped in lambdas to avoid taking a code path with the wrong type of data
formatdict = {
'bool': lambda: BoolFormat(data),
'int': lambda: IntegerFormat(data),
'float': lambda: FloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'longfloat': lambda: FloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'complexfloat': lambda: ComplexFloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'longcomplexfloat': lambda: ComplexFloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'datetime': lambda: DatetimeFormat(data, legacy=legacy),
'timedelta': lambda: TimedeltaFormat(data),
'object': lambda: _object_format,
'void': lambda: str_format,
'numpystr': lambda: repr_format}
# we need to wrap values in `formatter` in a lambda, so that the interface
# is the same as the above values.
def indirect(x):
return lambda: x
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
for key in formatdict.keys():
formatdict[key] = indirect(formatter['all'])
if 'int_kind' in fkeys:
for key in ['int']:
formatdict[key] = indirect(formatter['int_kind'])
if 'float_kind' in fkeys:
for key in ['float', 'longfloat']:
formatdict[key] = indirect(formatter['float_kind'])
if 'complex_kind' in fkeys:
for key in ['complexfloat', 'longcomplexfloat']:
formatdict[key] = indirect(formatter['complex_kind'])
if 'str_kind' in fkeys:
formatdict['numpystr'] = indirect(formatter['str_kind'])
for key in formatdict.keys():
if key in fkeys:
formatdict[key] = indirect(formatter[key])
return formatdict
def _get_format_function(data, **options):
"""
find the right formatting function for the dtype_
"""
dtype_ = data.dtype
dtypeobj = dtype_.type
formatdict = _get_formatdict(data, **options)
if issubclass(dtypeobj, _nt.bool_):
return formatdict['bool']()
elif issubclass(dtypeobj, _nt.integer):
if issubclass(dtypeobj, _nt.timedelta64):
return formatdict['timedelta']()
else:
return formatdict['int']()
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
return formatdict['longfloat']()
else:
return formatdict['float']()
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
return formatdict['longcomplexfloat']()
else:
return formatdict['complexfloat']()
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
return formatdict['numpystr']()
elif issubclass(dtypeobj, _nt.datetime64):
return formatdict['datetime']()
elif issubclass(dtypeobj, _nt.object_):
return formatdict['object']()
elif issubclass(dtypeobj, _nt.void):
if dtype_.names is not None:
return StructuredVoidFormat.from_data(data, **options)
else:
return formatdict['void']()
else:
return formatdict['numpystr']()
def _recursive_guard(fillvalue='...'):
"""
Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs
Decorates a function such that if it calls itself with the same first
argument, it returns `fillvalue` instead of recursing.
Largely copied from reprlib.recursive_repr
"""
def decorating_function(f):
repr_running = set()
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
return f(self, *args, **kwargs)
finally:
repr_running.discard(key)
return wrapper
return decorating_function
# gracefully handle recursive calls, when object arrays contain themselves
@_recursive_guard()
def _array2string(a, options, separator=' ', prefix=""):
# The formatter __init__s in _get_format_function cannot deal with
# subclasses yet, and we also need to avoid recursion issues in
# _formatArray with subclasses which return 0d arrays in place of scalars
data = asarray(a)
if a.shape == ():
a = data
if a.size > options['threshold']:
summary_insert = "..."
data = _leading_trailing(data, options['edgeitems'])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = _get_format_function(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, options['linewidth'],
next_line_prefix, separator, options['edgeitems'],
summary_insert, options['legacy'])
return lst
def _array2string_dispatcher(
a, max_line_width=None, precision=None,
suppress_small=None, separator=None, prefix=None,
style=None, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix=None,
*, legacy=None):
return (a,)
@array_function_dispatch(_array2string_dispatcher, module='numpy')
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix="",
*, legacy=None):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int or None, optional
Floating point precision.
Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
Defaults to ``numpy.get_printoptions()['suppress']``.
separator : str, optional
Inserted between elements.
prefix : str, optional
suffix : str, optional
The length of the prefix and suffix strings are used to respectively
align and wrap the output. An array is typically printed as::
prefix + array2string(a) + suffix
The output is left-padded by the length of the prefix string, and
wrapping is forced at the column ``max_line_width - len(suffix)``.
It should be noted that the content of prefix and suffix strings are
not included in the output.
style : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are:
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'void' : type `numpy.void`
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
Other keys that can be used to set a group of types at once are:
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
Defaults to ``numpy.get_printoptions()['threshold']``.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
Defaults to ``numpy.get_printoptions()['edgeitems']``.
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values.
Defaults to ``numpy.get_printoptions()['sign']``.
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types.
Defaults to ``numpy.get_printoptions()['floatmode']``.
Can take the following values:
- 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
- 'unique': Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
- 'maxprec': Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
- 'maxprec_equal': Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> np.array2string(x, precision=2, separator=',',
... suppress_small=True)
'[0.,1.,2.,3.]'
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0 0x1 0x2]'
"""
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
sign, formatter, floatmode, legacy)
options = _format_options.copy()
options.update(overrides)
if options['legacy'] == '1.13':
if style is np._NoValue:
style = repr
if a.shape == () and a.dtype.names is None:
return style(a.item())
elif style is not np._NoValue:
# Deprecation 11-9-2017 v1.14
warnings.warn("'style' argument is deprecated and no longer functional"
" except in 1.13 'legacy' mode",
DeprecationWarning, stacklevel=3)
if options['legacy'] != '1.13':
options['linewidth'] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
def _extendLine(s, line, word, line_width, next_line_prefix, legacy):
needs_wrap = len(line) + len(word) > line_width
if legacy != '1.13':
# don't wrap lines if it won't help
if len(line) <= len(next_line_prefix):
needs_wrap = False
if needs_wrap:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy):
"""
Extends line with nicely formatted (possibly multi-line) string ``word``.
"""
words = word.splitlines()
if len(words) == 1 or legacy == '1.13':
return _extendLine(s, line, word, line_width, next_line_prefix, legacy)
max_word_length = max(len(word) for word in words)
if (len(line) + max_word_length > line_width and
len(line) > len(next_line_prefix)):
s += line.rstrip() + '\n'
line = next_line_prefix + words[0]
indent = next_line_prefix
else:
indent = len(line)*' '
line += words[0]
for word in words[1::]:
s += line.rstrip() + '\n'
line = indent + word
suffix_length = max_word_length - len(words[-1])
line += suffix_length*' '
return s, line
def _formatArray(a, format_function, line_width, next_line_prefix,
separator, edge_items, summary_insert, legacy):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
def recurser(index, hanging_indent, curr_width):
"""
By using this local function, we don't need to recurse with all the
arguments. Since this function is not created recursively, the cost is
not significant
"""
axis = len(index)
axes_left = a.ndim - axis
if axes_left == 0:
return format_function(a[index])
# when recursing, add a space to align with the [ added, and reduce the
# length of the line by 1
next_hanging_indent = hanging_indent + ' '
if legacy == '1.13':
next_width = curr_width
else:
next_width = curr_width - len(']')
a_len = a.shape[axis]
show_summary = summary_insert and 2*edge_items < a_len
if show_summary:
leading_items = edge_items
trailing_items = edge_items
else:
leading_items = 0
trailing_items = a_len
# stringify the array with the hanging indent on the first line too
s = ''
# last axis (rows) - wrap elements if they would not fit on one line
if axes_left == 1:
# the length up until the beginning of the separator / bracket
if legacy == '1.13':
elem_width = curr_width - len(separator.rstrip())
else:
elem_width = curr_width - max(len(separator.rstrip()), len(']'))
line = hanging_indent
for i in range(leading_items):
word = recurser(index + (i,), next_hanging_indent, next_width)
s, line = _extendLine_pretty(
s, line, word, elem_width, hanging_indent, legacy)
line += separator
if show_summary:
s, line = _extendLine(
s, line, summary_insert, elem_width, hanging_indent, legacy)
if legacy == '1.13':
line += ", "
else:
line += separator
for i in range(trailing_items, 1, -1):
word = recurser(index + (-i,), next_hanging_indent, next_width)
s, line = _extendLine_pretty(
s, line, word, elem_width, hanging_indent, legacy)
line += separator
if legacy == '1.13':
# width of the separator is not considered on 1.13
elem_width = curr_width
word = recurser(index + (-1,), next_hanging_indent, next_width)
s, line = _extendLine_pretty(
s, line, word, elem_width, hanging_indent, legacy)
s += line
# other axes - insert newlines between rows
else:
s = ''
line_sep = separator.rstrip() + '\n'*(axes_left - 1)
for i in range(leading_items):
nested = recurser(index + (i,), next_hanging_indent, next_width)
s += hanging_indent + nested + line_sep
if show_summary:
if legacy == '1.13':
# trailing space, fixed nbr of newlines, and fixed separator
s += hanging_indent + summary_insert + ", \n"
else:
s += hanging_indent + summary_insert + line_sep
for i in range(trailing_items, 1, -1):
nested = recurser(index + (-i,), next_hanging_indent,
next_width)
s += hanging_indent + nested + line_sep
nested = recurser(index + (-1,), next_hanging_indent, next_width)
s += hanging_indent + nested
# remove the hanging indent, and wrap in []
s = '[' + s[len(hanging_indent):] + ']'
return s
try:
# invoke the recursive part with an initial index and prefix
return recurser(index=(),
hanging_indent=next_line_prefix,
curr_width=line_width)
finally:
# recursive closures have a cyclic reference to themselves, which
# requires gc to collect (gh-10620). To avoid this problem, for
# performance and PyPy friendliness, we break the cycle:
recurser = None
def _none_or_positive_arg(x, name):
if x is None:
return -1
if x < 0:
raise ValueError("{} must be >= 0".format(name))
return x
class FloatingFormat:
""" Formatter for subtypes of np.floating """
def __init__(self, data, precision, floatmode, suppress_small, sign=False,
*, legacy=None):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
self._legacy = legacy
if self._legacy == '1.13':
# when not 0d, legacy does not support '-'
if data.shape != () and sign == '-':
sign = ' '
self.floatmode = floatmode
if floatmode == 'unique':
self.precision = None
else:
self.precision = precision
self.precision = _none_or_positive_arg(self.precision, 'precision')
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.fillFormat(data)
def fillFormat(self, data):
# only the finite values are used to compute the number of digits
finite_vals = data[isfinite(data)]
# choose exponential mode based on the non-zero finite values:
abs_non_zero = absolute(finite_vals[finite_vals != 0])
if len(abs_non_zero) != 0:
max_val = np.max(abs_non_zero)
min_val = np.min(abs_non_zero)
with errstate(over='ignore'): # division can overflow
if max_val >= 1.e8 or (not self.suppress_small and
(min_val < 0.0001 or max_val/min_val > 1000.)):
self.exp_format = True
# do a first pass of printing all the numbers, to determine sizes
if len(finite_vals) == 0:
self.pad_left = 0
self.pad_right = 0
self.trim = '.'
self.exp_size = -1
self.unique = True
self.min_digits = None
elif self.exp_format:
trim, unique = '.', True
if self.floatmode == 'fixed' or self._legacy == '1.13':
trim, unique = 'k', False
strs = (dragon4_scientific(x, precision=self.precision,
unique=unique, trim=trim, sign=self.sign == '+')
for x in finite_vals)
frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))
int_part, frac_part = zip(*(s.split('.') for s in frac_strs))
self.exp_size = max(len(s) for s in exp_strs) - 1
self.trim = 'k'
self.precision = max(len(s) for s in frac_part)
self.min_digits = self.precision
self.unique = unique
# for back-compat with np 1.13, use 2 spaces & sign and full prec
if self._legacy == '1.13':
self.pad_left = 3
else:
# this should be only 1 or 2. Can be calculated from sign.
self.pad_left = max(len(s) for s in int_part)
# pad_right is only needed for nan length calculation
self.pad_right = self.exp_size + 2 + self.precision
else:
trim, unique = '.', True
if self.floatmode == 'fixed':
trim, unique = 'k', False
strs = (dragon4_positional(x, precision=self.precision,
fractional=True,
unique=unique, trim=trim,
sign=self.sign == '+')
for x in finite_vals)
int_part, frac_part = zip(*(s.split('.') for s in strs))
if self._legacy == '1.13':
self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
else:
self.pad_left = max(len(s) for s in int_part)
self.pad_right = max(len(s) for s in frac_part)
self.exp_size = -1
self.unique = unique
if self.floatmode in ['fixed', 'maxprec_equal']:
self.precision = self.min_digits = self.pad_right
self.trim = 'k'
else:
self.trim = '.'
self.min_digits = 0
if self._legacy != '1.13':
# account for sign = ' ' by adding one to pad_left
if self.sign == ' ' and not any(np.signbit(finite_vals)):
self.pad_left += 1
# if there are non-finite values, may need to increase pad_left
if data.size != finite_vals.size:
neginf = self.sign != '-' or any(data[isinf(data)] < 0)
nanlen = len(_format_options['nanstr'])
inflen = len(_format_options['infstr']) + neginf
offset = self.pad_right + 1 # +1 for decimal pt
self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset)
def __call__(self, x):
if not np.isfinite(x):
with errstate(invalid='ignore'):
if np.isnan(x):
sign = '+' if self.sign == '+' else ''
ret = sign + _format_options['nanstr']
else: # isinf
sign = '-' if x < 0 else '+' if self.sign == '+' else ''
ret = sign + _format_options['infstr']
return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret
if self.exp_format:
return dragon4_scientific(x,
precision=self.precision,
min_digits=self.min_digits,
unique=self.unique,
trim=self.trim,
sign=self.sign == '+',
pad_left=self.pad_left,
exp_digits=self.exp_size)
else:
return dragon4_positional(x,
precision=self.precision,
min_digits=self.min_digits,
unique=self.unique,
fractional=True,
trim=self.trim,
sign=self.sign == '+',
pad_left=self.pad_left,
pad_right=self.pad_right)
@set_module('numpy')
def format_float_scientific(x, precision=None, unique=True, trim='k',
sign=False, pad_left=None, exp_digits=None,
min_digits=None):
"""
Format a floating-point scalar as a decimal string in scientific notation.
Provides control over rounding, trimming and padding. Uses and assumes
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
Parameters
----------
x : python float or numpy floating scalar
Value to format.
precision : non-negative integer or None, optional
Maximum number of digits to print. May be None if `unique` is
`True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
other values of the same type, by judicious rounding. If `precision`
is given fewer digits than necessary can be printed. If `min_digits`
is given more can be printed, in which cases the last digit is rounded
with unbiased rounding.
If `False`, digits are generated as if printing an infinite-precision
value and stopping after `precision` digits, rounding the remaining
value with unbiased rounding
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
* 'k' : keep trailing zeros, keep decimal point (no trimming)
* '.' : trim all trailing zeros, leave decimal point
* '0' : trim all but the zero before the decimal point. Insert the
zero if it is missing.
* '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
Pad the left side of the string with whitespace until at least that
many characters are to the left of the decimal point.
exp_digits : non-negative integer, optional
Pad the exponent with zeros until it contains at least this many digits.
If omitted, the exponent will be at least 2 digits.
min_digits : non-negative integer or None, optional
Minimum number of digits to print. This only has an effect for
`unique=True`. In that case more digits than necessary to uniquely
identify the value may be printed and rounded unbiased.
-- versionadded:: 1.21.0
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_positional
Examples
--------
>>> np.format_float_scientific(np.float32(np.pi))
'3.1415927e+00'
>>> s = np.float32(1.23e24)
>>> np.format_float_scientific(s, unique=False, precision=15)
'1.230000071797338e+24'
>>> np.format_float_scientific(s, exp_digits=4)
'1.23e+0024'
"""
precision = _none_or_positive_arg(precision, 'precision')
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
min_digits = _none_or_positive_arg(min_digits, 'min_digits')
if min_digits > 0 and precision > 0 and min_digits > precision:
raise ValueError("min_digits must be less than or equal to precision")
return dragon4_scientific(x, precision=precision, unique=unique,
trim=trim, sign=sign, pad_left=pad_left,
exp_digits=exp_digits, min_digits=min_digits)
@set_module('numpy')
def format_float_positional(x, precision=None, unique=True,
fractional=True, trim='k', sign=False,
pad_left=None, pad_right=None, min_digits=None):
"""
Format a floating-point scalar as a decimal string in positional notation.
Provides control over rounding, trimming and padding. Uses and assumes
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
Parameters
----------
x : python float or numpy floating scalar
Value to format.
precision : non-negative integer or None, optional
Maximum number of digits to print. May be None if `unique` is
`True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
other values of the same type, by judicious rounding. If `precision`
is given fewer digits than necessary can be printed, or if `min_digits`
is given more can be printed, in which cases the last digit is rounded
with unbiased rounding.
If `False`, digits are generated as if printing an infinite-precision
value and stopping after `precision` digits, rounding the remaining
value with unbiased rounding
fractional : boolean, optional
If `True`, the cutoffs of `precision` and `min_digits` refer to the
total number of digits after the decimal point, including leading
zeros.
If `False`, `precision` and `min_digits` refer to the total number of
significant digits, before or after the decimal point, ignoring leading
zeros.
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
* 'k' : keep trailing zeros, keep decimal point (no trimming)
* '.' : trim all trailing zeros, leave decimal point
* '0' : trim all but the zero before the decimal point. Insert the
zero if it is missing.
* '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
Pad the left side of the string with whitespace until at least that
many characters are to the left of the decimal point.
pad_right : non-negative integer, optional
Pad the right side of the string with whitespace until at least that
many characters are to the right of the decimal point.
min_digits : non-negative integer or None, optional
Minimum number of digits to print. Only has an effect if `unique=True`
in which case additional digits past those necessary to uniquely
identify the value may be printed, rounding the last additional digit.
-- versionadded:: 1.21.0
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
Examples
--------
>>> np.format_float_positional(np.float32(np.pi))
'3.1415927'
>>> np.format_float_positional(np.float16(np.pi))
'3.14'
>>> np.format_float_positional(np.float16(0.3))
'0.3'
>>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
'0.3000488281'
"""
precision = _none_or_positive_arg(precision, 'precision')
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
pad_right = _none_or_positive_arg(pad_right, 'pad_right')
min_digits = _none_or_positive_arg(min_digits, 'min_digits')
if not fractional and precision == 0:
raise ValueError("precision must be greater than 0 if "
"fractional=False")
if min_digits > 0 and precision > 0 and min_digits > precision:
raise ValueError("min_digits must be less than or equal to precision")
return dragon4_positional(x, precision=precision, unique=unique,
fractional=fractional, trim=trim,
sign=sign, pad_left=pad_left,
pad_right=pad_right, min_digits=min_digits)
class IntegerFormat:
def __init__(self, data):
if data.size > 0:
max_str_len = max(len(str(np.max(data))),
len(str(np.min(data))))
else:
max_str_len = 0
self.format = '%{}d'.format(max_str_len)
def __call__(self, x):
return self.format % x
class BoolFormat:
def __init__(self, data, **kwargs):
# add an extra space so " True" and "False" have the same length and
# array elements align nicely when printed, except in 0d arrays
self.truestr = ' True' if data.shape != () else 'True'
def __call__(self, x):
return self.truestr if x else "False"
class ComplexFloatingFormat:
""" Formatter for subtypes of np.complexfloating """
def __init__(self, x, precision, floatmode, suppress_small,
sign=False, *, legacy=None):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
floatmode_real = floatmode_imag = floatmode
if legacy == '1.13':
floatmode_real = 'maxprec_equal'
floatmode_imag = 'maxprec'
self.real_format = FloatingFormat(
x.real, precision, floatmode_real, suppress_small,
sign=sign, legacy=legacy
)
self.imag_format = FloatingFormat(
x.imag, precision, floatmode_imag, suppress_small,
sign='+', legacy=legacy
)
def __call__(self, x):
r = self.real_format(x.real)
i = self.imag_format(x.imag)
# add the 'j' before the terminal whitespace in i
sp = len(i.rstrip())
i = i[:sp] + 'j' + i[sp:]
return r + i
class _TimelikeFormat:
def __init__(self, data):
non_nat = data[~isnat(data)]
if len(non_nat) > 0:
# Max str length of non-NaT elements
max_str_len = max(len(self._format_non_nat(np.max(non_nat))),
len(self._format_non_nat(np.min(non_nat))))
else:
max_str_len = 0
if len(non_nat) < data.size:
# data contains a NaT
max_str_len = max(max_str_len, 5)
self._format = '%{}s'.format(max_str_len)
self._nat = "'NaT'".rjust(max_str_len)
def _format_non_nat(self, x):
# override in subclass
raise NotImplementedError
def __call__(self, x):
if isnat(x):
return self._nat
else:
return self._format % self._format_non_nat(x)
class DatetimeFormat(_TimelikeFormat):
def __init__(self, x, unit=None, timezone=None, casting='same_kind',
legacy=False):
# Get the unit from the dtype
if unit is None:
if x.dtype.kind == 'M':
unit = datetime_data(x.dtype)[0]
else:
unit = 's'
if timezone is None:
timezone = 'naive'
self.timezone = timezone
self.unit = unit
self.casting = casting
self.legacy = legacy
# must be called after the above are configured
super().__init__(x)
def __call__(self, x):
if self.legacy == '1.13':
return self._format_non_nat(x)
return super().__call__(x)
def _format_non_nat(self, x):
return "'%s'" % datetime_as_string(x,
unit=self.unit,
timezone=self.timezone,
casting=self.casting)
class TimedeltaFormat(_TimelikeFormat):
def _format_non_nat(self, x):
return str(x.astype('i8'))
class SubArrayFormat:
def __init__(self, format_function):
self.format_function = format_function
def __call__(self, arr):
if arr.ndim <= 1:
return "[" + ", ".join(self.format_function(a) for a in arr) + "]"
return "[" + ", ".join(self.__call__(a) for a in arr) + "]"
class StructuredVoidFormat:
"""
Formatter for structured np.void objects.
This does not work on structured alias types like np.dtype(('i4', 'i2,i2')),
as alias scalars lose their field information, and the implementation
relies upon np.void.__getitem__.
"""
def __init__(self, format_functions):
self.format_functions = format_functions
@classmethod
def from_data(cls, data, **options):
"""
This is a second way to initialize StructuredVoidFormat, using the raw data
as input. Added to avoid changing the signature of __init__.
"""
format_functions = []
for field_name in data.dtype.names:
format_function = _get_format_function(data[field_name], **options)
if data.dtype[field_name].shape != ():
format_function = SubArrayFormat(format_function)
format_functions.append(format_function)
return cls(format_functions)
def __call__(self, x):
str_fields = [
format_function(field)
for field, format_function in zip(x, self.format_functions)
]
if len(str_fields) == 1:
return "({},)".format(str_fields[0])
else:
return "({})".format(", ".join(str_fields))
def _void_scalar_repr(x):
"""
Implements the repr for structured-void scalars. It is called from the
scalartypes.c.src code, and is placed here because it uses the elementwise
formatters defined above.
"""
return StructuredVoidFormat.from_data(array(x), **_format_options)(x)
_typelessdata = [int_, float_, complex_, bool_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def dtype_is_implied(dtype):
"""
Determine if the given dtype is implied by the representation of its values.
Parameters
----------
dtype : dtype
Data type
Returns
-------
implied : bool
True if the dtype is implied by the representation of its values.
Examples
--------
>>> np.core.arrayprint.dtype_is_implied(int)
True
>>> np.array([1, 2, 3], int)
array([1, 2, 3])
>>> np.core.arrayprint.dtype_is_implied(np.int8)
False
>>> np.array([1, 2, 3], np.int8)
array([1, 2, 3], dtype=int8)
"""
dtype = np.dtype(dtype)
if _format_options['legacy'] == '1.13' and dtype.type == bool_:
return False
# not just void types can be structured, and names are not part of the repr
if dtype.names is not None:
return False
return dtype.type in _typelessdata
def dtype_short_repr(dtype):
"""
Convert a dtype to a short form which evaluates to the same dtype.
The intent is roughly that the following holds
>>> from numpy import *
>>> dt = np.int64([1, 2]).dtype
>>> assert eval(dtype_short_repr(dt)) == dt
"""
if dtype.names is not None:
# structured dtypes give a list or tuple repr
return str(dtype)
elif issubclass(dtype.type, flexible):
# handle these separately so they don't give garbage like str256
return "'%s'" % str(dtype)
typename = dtype.name
# quote typenames which can't be represented as python variable names
if typename and not (typename[0].isalpha() and typename.isalnum()):
typename = repr(typename)
return typename
def _array_repr_implementation(
arr, max_line_width=None, precision=None, suppress_small=None,
array2string=array2string):
"""Internal version of array_repr() that allows overriding array2string."""
if max_line_width is None:
max_line_width = _format_options['linewidth']
if type(arr) is not ndarray:
class_name = type(arr).__name__
else:
class_name = "array"
skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0
prefix = class_name + "("
suffix = ")" if skipdtype else ","
if (_format_options['legacy'] == '1.13' and
arr.shape == () and not arr.dtype.names):
lst = repr(arr.item())
elif arr.size > 0 or arr.shape == (0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', prefix, suffix=suffix)
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
arr_str = prefix + lst + suffix
if skipdtype:
return arr_str
dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
# compute whether we should put dtype on a new line: Do so if adding the
# dtype would extend the last line past max_line_width.
# Note: This line gives the correct result even when rfind returns -1.
last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
spacer = " "
if _format_options['legacy'] == '1.13':
if issubclass(arr.dtype.type, flexible):
spacer = '\n' + ' '*len(class_name + "(")
elif last_line_len + len(dtype_str) + 1 > max_line_width:
spacer = '\n' + ' '*len(class_name + "(")
return arr_str + spacer + dtype_str
def _array_repr_dispatcher(
arr, max_line_width=None, precision=None, suppress_small=None):
return (arr,)
@array_function_dispatch(_array_repr_dispatcher, module='numpy')
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int, optional
Floating point precision.
Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
Defaults to ``numpy.get_printoptions()['suppress']``.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([0.000001, 0. , 2. , 3. ])'
"""
return _array_repr_implementation(
arr, max_line_width, precision, suppress_small)
@_recursive_guard()
def _guarded_repr_or_str(v):
if isinstance(v, bytes):
return repr(v)
return str(v)
def _array_str_implementation(
a, max_line_width=None, precision=None, suppress_small=None,
array2string=array2string):
"""Internal version of array_str() that allows overriding array2string."""
if (_format_options['legacy'] == '1.13' and
a.shape == () and not a.dtype.names):
return str(a.item())
# the str of 0d arrays is a special case: It should appear like a scalar,
# so floats are not truncated by `precision`, and strings are not wrapped
# in quotes. So we return the str of the scalar value.
if a.shape == ():
# obtain a scalar and call str on it, avoiding problems for subclasses
# for which indexing with () returns a 0d instead of a scalar by using
# ndarray's getindex. Also guard against recursive 0d object arrays.
return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))
return array2string(a, max_line_width, precision, suppress_small, ' ', "")
def _array_str_dispatcher(
a, max_line_width=None, precision=None, suppress_small=None):
return (a,)
@array_function_dispatch(_array_str_dispatcher, module='numpy')
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int, optional
Floating point precision.
Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
Defaults to ``numpy.get_printoptions()['suppress']``.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return _array_str_implementation(
a, max_line_width, precision, suppress_small)
# needed if __array_function__ is disabled
_array2string_impl = getattr(array2string, '__wrapped__', array2string)
_default_array_str = functools.partial(_array_str_implementation,
array2string=_array2string_impl)
_default_array_repr = functools.partial(_array_repr_implementation,
array2string=_array2string_impl)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> _ = a
>>> # [0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(_default_array_repr, 1)
else:
return multiarray.set_string_function(_default_array_str, 0)
else:
return multiarray.set_string_function(f, repr)
|
pbrod/numpy
|
numpy/core/arrayprint.py
|
Python
|
bsd-3-clause
| 61,625
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..extern import six
from ..extern.six.moves import zip
import warnings
import weakref
from copy import deepcopy
import numpy as np
from numpy import ma
from ..units import Unit, Quantity
from ..utils.compat import NUMPY_LT_1_8
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, dtype_info_name
from ..extern.six.moves import range
from . import groups
from . import pprint
from .np_utils import fix_column_name
# These "shims" provide __getitem__ implementations for Column and MaskedColumn
from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim
# Create a generic TableFormatter object for use by bare columns with no
# parent table.
FORMATTER = pprint.TableFormatter()
INTEGER_TYPES = (int, long, np.integer) if six.PY2 else (int, np.integer)
class StringTruncateWarning(UserWarning):
"""
Warning class for when a string column is assigned a value
that gets truncated because the base (numpy) string length
is too short.
This does not inherit from AstropyWarning because we want to use
stacklevel=2 to show the user where the issue occurred in their code.
"""
pass
def _auto_names(n_cols):
from . import conf
return [str(conf.auto_colname).format(i) for i in range(n_cols)]
# list of one and two-dimensional comparison functions, which sometimes return
# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure
# they only return plain (masked) arrays (see #1446 and #1685)
_comparison_functions = set(
[np.greater, np.greater_equal, np.less, np.less_equal,
np.not_equal, np.equal,
np.isfinite, np.isinf, np.isnan, np.sign, np.signbit])
def col_copy(col, copy_indices=True):
"""
This is a mixin-safe version of Column.copy() (with copy_data=True).
"""
if isinstance(col, BaseColumn):
return col.copy()
# The new column should have None for the parent_table ref. If the
# original parent_table weakref there at the point of copying then it
# generates an infinite recursion. Instead temporarily remove the weakref
# on the original column and restore after the copy in an exception-safe
# manner.
parent_table = col.info.parent_table
indices = col.info.indices
col.info.parent_table = None
col.info.indices = []
try:
newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col)
newcol.info = col.info
newcol.info.indices = deepcopy(indices or []) if copy_indices else []
for index in newcol.info.indices:
index.replace_col(col, newcol)
finally:
col.info.parent_table = parent_table
col.info.indices = indices
return newcol
class FalseArray(np.ndarray):
def __new__(cls, shape):
obj = np.zeros(shape, dtype=np.bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError('Cannot set any element of {0} class to True'
.format(self.__class__.__name__))
if six.PY2: # avoid falling back to ndarray.__setslice__
def __setslice__(self, start, stop, val):
self.__setitem__(slice(start, stop), val)
class ColumnInfo(BaseColumnInfo):
attrs_from_parent = BaseColumnInfo.attr_names
_supports_indexing = True
class BaseColumn(_ColumnGetitemShim, np.ndarray):
meta = MetaData()
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if data is None:
dtype = (np.dtype(dtype).str, shape)
self_data = np.zeros(length, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, '_name'):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = deepcopy(data.meta)
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = np.array(data.to(unit), dtype=dtype, copy=copy)
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = deepcopy(data.info.meta)
else:
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = fix_column_name(name)
self.unit = unit
self.format = format
self.description = description
self.meta = meta
self._parent_table = None
self.indices = deepcopy(getattr(data, 'indices', [])) if \
copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def parent_table(self):
if self._parent_table is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order='C', data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ('_name', 'unit', 'format', 'description', 'meta', 'indices')
attrs = {name: val for name, val in zip(names, state[-1])}
state = state[:-1]
# Using super(type(self), self).__setstate__() gives an infinite
# recursion. Manually call the right super class to actually set up
# the array object.
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state)
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (self.name, self.unit, self.format, self.description,
self.meta, self.indices)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
# avoid == and != to be done based on type of subclass
# (helped solve #1446; see also __array_wrap__)
def __eq__(self, other):
return self.data.__eq__(other)
def __ne__(self, other):
return self.data.__ne__(other)
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
if six.callable(super(BaseColumn, self).__array_finalize__):
super(BaseColumn, self).__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, 'indices'): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
def __array_wrap__(self, out_arr, context=None):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, we use "[()]" to select everything, and to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
out_arr = super(BaseColumn, self).__array_wrap__(out_arr, context)
if (self.shape != out_arr.shape or
(isinstance(out_arr, BaseColumn) and
(context is not None and context[0] in _comparison_functions))):
return out_arr.data[()]
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val):
val = fix_column_name(val)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
for str_val in _pformat_col_iter(self, -1, show_name=False, show_unit=False,
show_dtype=False, outs={}):
yield str_val
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : boolean
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError('Comparison `col` must be a Column or '
'MaskedColumn object')
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False,
html=False):
"""Return a list of formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name (default=True)
show_unit : bool
Include a header row for unit (default=False)
show_dtype : bool
Include column dtype (default=False)
html : bool
Format the output as an HTML table (default=False)
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
html=html)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name (default=True)
show_unit : bool
Include a header row for unit (default=False)
show_dtype : bool
Include column dtype (default=True)
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
show_name : bool
Include a header row for column names (default=True)
show_unit : bool
Include a header row for unit (default=False)
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(self, max_lines=max_lines, show_name=show_name,
show_unit=show_unit)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict='silent')
@unit.deleter
def unit(self):
self._unit = None
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(
new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``
"""
if self.parent_table:
if hasattr(self.parent_table, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self.parent_table._groups._indices)
elif hasattr(self, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(self, copy=False, dtype=self.dtype, order='A')
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : `~astropy.units.Unit` or str
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of equivalence pairs, optional
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self
"""
for attr in ('name', 'unit', 'format', 'description'):
val = getattr(obj, attr, None)
setattr(self, attr, val)
self.meta = deepcopy(getattr(obj, 'meta', {}))
class Column(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError("Cannot convert a MaskedColumn with masked value to a Column")
self = super(Column, cls).__new__(cls, data=data, name=name, dtype=dtype,
shape=shape, length=length, description=description,
unit=unit, format=format, meta=meta,
copy=copy, copy_indices=copy_indices)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError("cannot set mask value to a column in non-masked Table")
super(Column, self).__setattr__(item, value)
if item == 'unit' and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (('name', self.name),
('dtype', dtype_info_name(self.dtype)),
('shape', shape),
('unit', unit),
('format', self.format),
('description', self.description),
('length', len(self))):
if val is not None:
descr_vals.append('{0}={1}'.format(attr, repr(val)))
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from ..utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html)
out = descr + '\n'.join(data_lines)
if six.PY2 and isinstance(out, six.text_type):
out = out.encode('utf-8')
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __unicode__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return '\n'.join(lines)
if not six.PY2:
__str__ = __unicode__
def __bytes__(self):
return six.text_type(self).encode('utf-8')
if six.PY2:
__str__ = __bytes__
def _check_string_truncate(self, value):
value = np.asanyarray(value, dtype=self.dtype.type)
if value.dtype.itemsize > self.dtype.itemsize:
warnings.warn('truncated right side string(s) longer than {} '
'character(s) during assignment'
.format(self.dtype.str[2:]),
StringTruncateWarning,
stacklevel=3)
def __setitem__(self, index, value):
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
if six.PY2:
# avoid falling through to ndarray.__setslice__, instead using
# self.__setitem__, which is much faster (see above). [#3020]
def __setslice__(self, start, stop, value):
self.__setitem__(slice(start, stop), value)
def insert(self, obj, values):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=0)
data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
data = np.insert(self, obj, values, axis=0)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):
"""Define a masked data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
mask : list, ndarray or None
Boolean mask for which True indicates missing or invalid data
fill_value : float, int, str or None
Value used when filling masked column elements
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A MaskedColumn is similar to a Column except that it includes ``mask`` and
``fill_value`` attributes. It can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = MaskedColumn(data=[1, 2], name='name')
col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])
col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)
The ``mask`` argument will be cast as a boolean array and specifies
which elements are considered to be missing or invalid.
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``. When ``data`` is provided then the ``shape``
and ``length`` arguments are ignored.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = MaskedColumn(name='name', length=5)
col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
def __new__(cls, data=None, name=None, mask=None, fill_value=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if mask is None and hasattr(data, 'mask'):
mask = data.mask
else:
mask = deepcopy(mask)
# Create self using MaskedArray as a wrapper class, following the example of
# class MSubArray in
# https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py
# This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and
# https://github.com/astropy/astropy/commit/ff6039e8)
# First just pass through all args and kwargs to BaseColumn, then wrap that object
# with MaskedArray.
self_data = BaseColumn(data, dtype=dtype, shape=shape, length=length, name=name,
unit=unit, format=format, description=description,
meta=meta, copy=copy, copy_indices=copy_indices)
self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)
# Note: do not set fill_value in the MaskedArray constructor because this does not
# go through the fill_value workarounds (see _fix_fill_value below).
if fill_value is None and hasattr(data, 'fill_value') and data.fill_value is not None:
# Coerce the fill_value to the correct type since `data` may be a
# different dtype than self.
fill_value = self.dtype.type(data.fill_value)
self.fill_value = fill_value
self.parent_table = None
# needs to be done here since self doesn't come from BaseColumn.__new__
for index in self.indices:
index.replace_col(self_data, self)
return self
def _fix_fill_value(self, val):
"""Fix a fill value (if needed) to work around a bug with setting the fill
value of a string array in MaskedArray with Python 3.x. See
https://github.com/numpy/numpy/pull/2733. This mimics the check in
numpy.ma.core._check_fill_value() (version < 1.8) which incorrectly sets
fill_value to a default if self.dtype.char is 'U' (which is the case for Python
3). Here we change the string to a byte string so that in Python 3 the
isinstance(val, basestring) part fails.
"""
if (NUMPY_LT_1_8 and isinstance(val, six.string_types) and
(self.dtype.char not in 'SV')):
val = val.encode()
return val
@property
def fill_value(self):
return self.get_fill_value() # defer to native ma.MaskedArray method
@fill_value.setter
def fill_value(self, val):
"""Set fill value both in the masked column view and in the parent table
if it exists. Setting one or the other alone doesn't work."""
val = self._fix_fill_value(val)
# Yet another ma bug workaround: If the value of fill_value for a string array is
# requested but not yet set then it gets created as 'N/A'. From this point onward
# any new fill_values are truncated to 3 characters. Note that this does not
# occur if the masked array is a structured array (as in the previous block that
# deals with the parent table).
#
# >>> x = ma.array(['xxxx'])
# >>> x.fill_value # fill_value now gets represented as an 'S3' array
# 'N/A'
# >>> x.fill_value='yyyy'
# >>> x.fill_value
# 'yyy'
#
# To handle this we are forced to reset a private variable first:
self._fill_value = None
self.set_fill_value(val) # defer to native ma.MaskedArray method
@property
def data(self):
out = self.view(ma.MaskedArray)
# The following is necessary because of a bug in Numpy, which was
# fixed in numpy/numpy#2703. The fix should be included in Numpy 1.8.0.
out.fill_value = self.fill_value
return out
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar; optional
The value to use for invalid entries (`None` by default). If
`None`, the ``fill_value`` attribute of the array is used
instead.
Returns
-------
filled_column : Column
A copy of ``self`` with masked entries replaced by `fill_value`
(be it the function argument or the attribute of ``self``).
"""
if fill_value is None:
fill_value = self.fill_value
fill_value = self._fix_fill_value(fill_value)
data = super(MaskedColumn, self).filled(fill_value)
# Use parent table definition of Column if available
column_cls = self.parent_table.Column if (self.parent_table is not None) else Column
out = column_cls(name=self.name, data=data, unit=self.unit,
format=self.format, description=self.description,
meta=deepcopy(self.meta))
return out
def insert(self, obj, values, mask=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.table.MaskedColumn` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
mask : boolean array_like
Mask value(s) to insert. If not supplied then False is used.
Returns
-------
out : `~astropy.table.MaskedColumn`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new masked column is returned.
"""
self_ma = self.data # self viewed as MaskedArray
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
new_data = np.insert(self_ma.data, obj, None, axis=0)
new_data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
new_data = np.insert(self_ma.data, obj, values, axis=0)
if mask is None:
if self.dtype.kind == 'O':
mask = False
else:
mask = np.zeros(values.shape, dtype=np.bool)
new_mask = np.insert(self_ma.mask, obj, mask, axis=0)
new_ma = np.ma.array(new_data, mask=new_mask, copy=False)
out = new_ma.view(self.__class__)
out.parent_table = None
out.indices = []
out._copy_attrs(self)
return out
def _copy_attrs_slice(self, out):
# Fixes issue #3023: when calling getitem with a MaskedArray subclass
# the original object attributes are not copied.
if out.__class__ is self.__class__:
out.parent_table = None
# we need this because __getitem__ does a shallow copy of indices
if out.indices is self.indices:
out.indices = []
out._copy_attrs(self)
return out
def __setitem__(self, index, value):
# update indices
self.info.adjust_indices(index, value, len(self))
ma.MaskedArray.__setitem__(self, index, value)
# We do this to make the methods show up in the API docs
name = BaseColumn.name
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
|
tbabej/astropy
|
astropy/table/column.py
|
Python
|
bsd-3-clause
| 41,990
|
#!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
# See the test.osl source for explanation of this test
command = testshade("-g 16 16 test -od uint8 -o Cout out.tif")
outputs = [ "out.tif" ]
|
imageworks/OpenShadingLanguage
|
testsuite/bug-outputinit/run.py
|
Python
|
bsd-3-clause
| 337
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
__doctest_skip__ = ['quantity_support']
def quantity_support(format='latex_inline'):
"""
Enable support for plotting `astropy.units.Quantity` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.quantity_support():
... plt.figure()
... plt.plot([1, 2, 3] * u.m)
[...]
... plt.plot([101, 125, 150] * u.cm)
[...]
... plt.draw()
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to ``latex_inline``.
"""
from .. import units as u
from matplotlib import units
from matplotlib import ticker
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return 'π/2'
elif n == 2:
return 'π'
elif n % 2 == 0:
return '{0}π'.format(n / 2)
else:
return '{0}π/2'.format(n)
class MplQuantityConverter(units.ConversionInterface):
def __init__(self):
if u.Quantity not in units.registry:
units.registry[u.Quantity] = self
self._remove = True
else:
self._remove = False
@staticmethod
def axisinfo(unit, axis):
if unit == u.radian:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.to_string(),
)
elif unit == u.degree:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter('%i°'),
label=unit.to_string(),
)
elif unit is not None:
return units.AxisInfo(label=unit.to_string(format))
return None
@staticmethod
def convert(val, unit, axis):
if isinstance(val, u.Quantity):
return val.to_value(unit)
else:
return val
@staticmethod
def default_units(x, axis):
if hasattr(x, 'unit'):
return x.unit
return None
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._remove:
del units.registry[u.Quantity]
return MplQuantityConverter()
|
kelle/astropy
|
astropy/visualization/units.py
|
Python
|
bsd-3-clause
| 2,941
|
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['jsk_apc2016_common'],
package_dir={'': 'python'},
)
setup(**d)
|
start-jsk/jsk_apc
|
jsk_apc2016_common/setup.py
|
Python
|
bsd-3-clause
| 232
|
import datetime
import logging
from django.conf import settings
from django.core.exceptions import NON_FIELD_ERRORS
from google.appengine.api.datastore import Key, Delete, MAX_ALLOWABLE_QUERIES
from google.appengine.datastore.datastore_rpc import TransactionOptions
from google.appengine.ext import db
from .unique_utils import unique_identifiers_from_entity
from .utils import key_exists
from djangae.db.backends.appengine.dbapi import IntegrityError, NotSupportedError
DJANGAE_LOG = logging.getLogger("djangae")
def has_active_unique_constraints(model_or_instance):
"""
Returns true if the model/instance has unique fields or unique_together fields and unique
constraint checking is enabled on the model
"""
django_opts = getattr(model_or_instance, "_meta", None)
# If there are no unique fields on the model, return false
if not django_opts.unique_together and not any(x.unique for x in django_opts.fields):
return False
opts = getattr(model_or_instance, "Djangae", None)
if opts:
if hasattr(opts, "disable_constraint_checks"):
if opts.disable_constraint_checks:
return False
else:
return True
return not getattr(settings, "DJANGAE_DISABLE_CONSTRAINT_CHECKS", False)
class KeyProperty(db.Property):
"""A property that stores a datastore.Key reference to another object.
Think of this as a Django GenericForeignKey which returns only the PK value, not the whole
object, or a db.ReferenceProperty which can point to any model kind, and only returns the Key.
"""
def validate(self, value):
if value is None or isinstance(value, Key):
return value
raise ValueError("KeyProperty only accepts datastore.Key or None")
class UniqueMarker(db.Model):
instance = KeyProperty()
created = db.DateTimeProperty(required=True, auto_now_add=True)
@staticmethod
def kind():
return "_djangae_unique_marker"
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def acquire_identifiers(identifiers, entity_key):
return _acquire_identifiers(identifiers, entity_key)
def _acquire_identifiers(identifiers, entity_key):
# This must always be in a cross-group transaction, because even if there's only 1 identifider,
# in the case where that identifier already exists, we then check if its `instance` exists
assert entity_key
namespace = entity_key.namespace() or None
identifier_keys = [
Key.from_path(UniqueMarker.kind(), identifier, namespace=namespace) for identifier in identifiers
]
existing_markers = UniqueMarker.get(identifier_keys)
markers_to_create = []
markers = []
for identifier_key, existing_marker in zip(identifier_keys, existing_markers):
# Backwards compatability: we used to create the markers first in an independent transaction
# and then create the entity and update the `instance` on the markers. This meant that it
# was possible that the independent marker creation transaction finished first and the outer
# transaction failed, causing stale markers to be left behind. We no longer do it this way
# but we still want to ignore any old stale markers, hence if instance is None we overwrite.
now = datetime.datetime.utcnow()
if not existing_marker or existing_marker.instance is None:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
elif existing_marker.instance != entity_key and key_exists(existing_marker.instance):
fields_and_values = identifier_key.name().split("|")
table_name = fields_and_values[0]
fields_and_values = fields_and_values[1:]
fields = [x.split(":")[0] for x in fields_and_values]
raise IntegrityError("Unique constraint violation for kind {} on fields: {}".format(table_name, ", ".join(fields)))
elif existing_marker.instance != entity_key:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
else:
# The marker is ours anyway
markers.append(existing_marker)
db.put(markers_to_create)
return markers + markers_to_create
def get_markers_for_update(model, old_entity, new_entity):
"""
Given an old entity state, and the new state, updates the identifiers
appropriately. Should be called before saving the new_state
"""
old_ids = set(unique_identifiers_from_entity(model, old_entity, ignore_pk=True))
new_ids = set(unique_identifiers_from_entity(model, new_entity, ignore_pk=True))
to_release = old_ids - new_ids
to_acquire = new_ids - old_ids
return to_acquire, to_release
def update_instance_on_markers(entity, markers):
# TODO: fix me!
def update(marker, instance):
marker = UniqueMarker.get(marker.key())
if not marker:
return
marker.instance = instance
marker.put()
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_all():
instance = entity.key()
for marker in markers:
update(marker, instance)
update_all()
def acquire(model, entity):
"""
Given a model and entity, this tries to acquire unique marker locks for the instance. If
the locks already exist then an IntegrityError will be thrown.
"""
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
return acquire_identifiers(identifiers, entity.key())
def release_markers(markers):
""" Delete the given UniqueMarker objects. """
# Note that these should all be from the same Django model instance, and therefore there should
# be a maximum of 25 of them (because everything blows up if you have more than that - limitation)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(markers) > 1)
def txn():
Delete([marker.key() for marker in markers])
txn()
def release_identifiers(identifiers, namespace):
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(identifiers) > 1)
def txn():
_release_identifiers(identifiers, namespace)
txn()
def _release_identifiers(identifiers, namespace):
keys = [Key.from_path(UniqueMarker.kind(), x, namespace=namespace) for x in identifiers]
Delete(keys)
def release(model, entity):
""" Delete the UniqueMarker objects for the given entity. """
if not has_active_unique_constraints(model):
return
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
# Key.from_path expects None for an empty namespace, but Key.namespace() returns ''
namespace = entity.key().namespace() or None
release_identifiers(identifiers, namespace=namespace)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_identifiers(to_acquire, to_release, key):
""" A combination of acquire_identifiers and release_identifiers in a combined transaction. """
_acquire_identifiers(to_acquire, key)
_release_identifiers(to_release, key.namespace() or None)
class UniquenessMixin(object):
""" Mixin overriding the methods checking value uniqueness.
For models defining unique constraints this mixin should be inherited from.
When iterable (list or set) fields are marked as unique it must be used.
This is a copy of Django's implementation, save for the part marked by the comment.
"""
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
continue
if f.primary_key and not self._state.adding:
continue
##########################################################################
# This is a modification to Django's native implementation of this method;
# we conditionally build a __in lookup if the value is an iterable.
lookup = str(field_name)
if isinstance(lookup_value, (list, set, tuple)):
lookup = "%s__overlap" % lookup
lookup_kwargs[lookup] = lookup_value
##########################################################################
# / end of changes
if len(unique_check) != len(lookup_kwargs):
continue
#######################################################
# Deal with long __in lookups by doing multiple queries in that case
# This is a bit hacky, but we really have no choice due to App Engine's
# 30 multi-query limit. This also means we can't support multiple list fields in
# a unique combination
#######################################################
if len([x for x in lookup_kwargs if x.endswith("__in")]) > 1:
raise NotSupportedError("You cannot currently have two list fields in a unique combination")
# Split IN queries into multiple lookups if they are too long
lookups = []
for k, v in lookup_kwargs.iteritems():
if (k.endswith("__in") or k.endswith("__overlap")) and len(v) > MAX_ALLOWABLE_QUERIES:
v = list(v)
while v:
new_lookup = lookup_kwargs.copy()
new_lookup[k] = v[:30]
v = v[30:]
lookups.append(new_lookup)
break
else:
# Otherwise just use the one lookup
lookups = [lookup_kwargs]
for lookup_kwargs in lookups:
qs = model_class._default_manager.filter(**lookup_kwargs).values_list("pk", flat=True)
model_class_pk = self._get_pk_val(model_class._meta)
result = list(qs)
if not self._state.adding and model_class_pk is not None:
# If we are saving an instance, we ignore it's PK in the result
try:
result.remove(model_class_pk)
except ValueError:
pass
if result:
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
break
return errors
|
kirberich/djangae
|
djangae/db/constraints.py
|
Python
|
bsd-3-clause
| 11,037
|
import io
import unittest
from payment_terminal.drivers.bbs.connection import read_frame, write_frame
class TestBBSFrames(unittest.TestCase):
def test_read_one(self):
port = io.BytesIO(b'\x00\x0512345')
self.assertEqual(read_frame(port), b'12345')
def test_read_two(self):
port = io.BytesIO(b'\x00\x0512345\x00\x06123456')
self.assertEqual(read_frame(port), b'12345')
self.assertEqual(read_frame(port), b'123456')
def test_read_end_of_file(self):
port = io.BytesIO(b'')
# TODO more specific
self.assertRaises(Exception, read_frame, port)
def test_read_truncated_header(self):
port = io.BytesIO(b'a')
# TODO more specific
self.assertRaises(Exception, read_frame, port)
def test_read_truncated_body(self):
port = io.BytesIO(b'\x00\x09trunca')
# TODO more specific
self.assertRaises(Exception, read_frame, port)
def test_write_one(self):
port = io.BytesIO()
write_frame(port, b'hello world')
self.assertEqual(port.getvalue(), b'\x00\x0bhello world')
def test_write_two(self):
port = io.BytesIO()
write_frame(port, b'12345')
write_frame(port, b'123456')
self.assertEqual(port.getvalue(), b'\x00\x0512345\x00\x06123456')
def test_write_too_much(self):
port = io.BytesIO()
# TODO more specific
self.assertRaises(Exception, write_frame, port, b'x' * 2**16)
|
bwhmather/python-payment-terminal
|
payment_terminal/drivers/bbs/tests/test_frames.py
|
Python
|
bsd-3-clause
| 1,484
|
from __future__ import absolute_import, unicode_literals
import re
from django.conf import settings
from django.contrib.auth.models import User
from django_comments import signals
from django_comments.models import Comment
from . import CommentTestCase
from ..models import Article, Book
post_redirect_re = re.compile(r'^http://testserver/posted/\?c=(?P<pk>\d+$)')
class CommentViewTests(CommentTestCase):
def testPostCommentHTTPMethods(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.get("/post/", data)
self.assertEqual(response.status_code, 405)
self.assertEqual(response["Allow"], "POST")
def testPostCommentMissingCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["content_type"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["content_type"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentMissingObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["object_pk"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["object_pk"] = "14"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidIntegerPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
data["object_pk"] = '\ufffd'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidDecimalPK(self):
b = Book.objects.get(pk='12.34')
data = self.getValidData(b)
data["comment"] = "This is another comment"
data["object_pk"] = 'cookies'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testCommentPreview(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["preview"] = "Preview"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "comments/preview.html")
def testHashTampering(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testDebugCommentErrors(self):
"""The debug error template should be shown only if DEBUG is True"""
olddebug = settings.DEBUG
settings.DEBUG = True
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateUsed(response, "comments/400-debug.html")
settings.DEBUG = False
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateNotUsed(response, "comments/400-debug.html")
settings.DEBUG = olddebug
def testCreateValidComment(self):
address = "1.2.3.4"
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.response = self.client.post("/post/", data, REMOTE_ADDR=address)
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, address)
self.assertEqual(c.comment, "This is my comment")
def testCreateValidCommentIPv6(self):
"""
Test creating a valid comment with a long IPv6 address.
Note that this test should fail when Comment.ip_address is an IPAddress instead of a GenericIPAddress,
but does not do so on SQLite or PostgreSQL, because they use the TEXT and INET types, which already
allow storing an IPv6 address internally.
"""
address = "2a02::223:6cff:fe8a:2e8a"
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.response = self.client.post("/post/", data, REMOTE_ADDR=address)
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, address)
self.assertEqual(c.comment, "This is my comment")
def testCreateValidCommentIPv6Unpack(self):
address = "::ffff:18.52.18.52"
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.response = self.client.post("/post/", data, REMOTE_ADDR=address)
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
# We trim the '::ffff:' bit off because it is an IPv4 addr
self.assertEqual(c.ip_address, address[7:])
self.assertEqual(c.comment, "This is my comment")
def testPostAsAuthenticatedUser(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data['name'] = data['email'] = ''
self.client.login(username="normaluser", password="normaluser")
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, "1.2.3.4")
u = User.objects.get(username='normaluser')
self.assertEqual(c.user, u)
self.assertEqual(c.user_name, u.get_full_name())
self.assertEqual(c.user_email, u.email)
def testPostAsAuthenticatedUserWithoutFullname(self):
"""
Check that the user's name in the comment is populated for
authenticated users without first_name and last_name.
"""
user = User.objects.create_user(username='jane_other',
email='jane@example.com', password='jane_other')
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data['name'] = data['email'] = ''
self.client.login(username="jane_other", password="jane_other")
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
c = Comment.objects.get(user=user)
self.assertEqual(c.ip_address, "1.2.3.4")
self.assertEqual(c.user_name, 'jane_other')
user.delete()
def testPreventDuplicateComments(self):
"""Prevent posting the exact same comment twice"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.client.post("/post/", data)
self.client.post("/post/", data)
self.assertEqual(Comment.objects.count(), 1)
# This should not trigger the duplicate prevention
self.client.post("/post/", dict(data, comment="My second comment."))
self.assertEqual(Comment.objects.count(), 2)
def testCommentSignals(self):
"""Test signals emitted by the comment posting view"""
# callback
def receive(sender, **kwargs):
self.assertEqual(kwargs['comment'].comment, "This is my comment")
self.assertTrue('request' in kwargs)
received_signals.append(kwargs.get('signal'))
# Connect signals and keep track of handled ones
received_signals = []
expected_signals = [
signals.comment_will_be_posted, signals.comment_was_posted
]
for signal in expected_signals:
signal.connect(receive)
# Post a comment and check the signals
self.testCreateValidComment()
self.assertEqual(received_signals, expected_signals)
for signal in expected_signals:
signal.disconnect(receive)
def testWillBePostedSignal(self):
"""
Test that the comment_will_be_posted signal can prevent the comment from
actually getting saved
"""
def receive(sender, **kwargs):
return False
signals.comment_will_be_posted.connect(receive, dispatch_uid="comment-test")
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Comment.objects.count(), 0)
signals.comment_will_be_posted.disconnect(dispatch_uid="comment-test")
def testWillBePostedSignalModifyComment(self):
"""
Test that the comment_will_be_posted signal can modify a comment before
it gets posted
"""
def receive(sender, **kwargs):
# a bad but effective spam filter :)...
kwargs['comment'].is_public = False
signals.comment_will_be_posted.connect(receive)
self.testCreateValidComment()
c = Comment.objects.all()[0]
self.assertFalse(c.is_public)
def testCommentNext(self):
"""Test the different "next" actions the comment view can take"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertIsNotNone(match, "Unexpected redirect location: %s" % location)
data["next"] = "/somewhere/else/"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?c=\d+$", location)
self.assertIsNotNone(match, "Unexpected redirect location: %s" % location)
data["next"] = "http://badserver/somewhere/else/"
data["comment"] = "This is another comment with an unsafe next url"
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertIsNotNone(match, "Unsafe redirection to: %s" % location)
def testCommentDoneView(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertIsNotNone(match, "Unexpected redirect location: %s" % location)
pk = int(match.group('pk'))
response = self.client.get(location)
self.assertTemplateUsed(response, "comments/posted.html")
self.assertEqual(response.context[0]["comment"], Comment.objects.get(pk=pk))
def testCommentNextWithQueryString(self):
"""
The `next` key needs to handle already having a query string (#10585)
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/?foo=bar"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?foo=bar&c=\d+$", location)
self.assertIsNotNone(match, "Unexpected redirect location: %s" % location)
def testCommentPostRedirectWithInvalidIntegerPK(self):
"""
Tests that attempting to retrieve the location specified in the
post redirect, after adding some invalid data to the expected
querystring it ends with, doesn't cause a server error.
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
broken_location = location + "\ufffd"
response = self.client.get(broken_location)
self.assertEqual(response.status_code, 200)
def testCommentNextWithQueryStringAndAnchor(self):
"""
The `next` key needs to handle already having an anchor. Refs #13411.
"""
# With a query string also.
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/?foo=bar#baz"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?foo=bar&c=\d+#baz$", location)
self.assertIsNotNone(match, "Unexpected redirect location: %s" % location)
# Without a query string
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/#baz"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?c=\d+#baz$", location)
self.assertIsNotNone(match, "Unexpected redirect location: %s" % location)
|
giginet/django-contrib-comments
|
tests/testapp/tests/test_comment_views.py
|
Python
|
bsd-3-clause
| 13,575
|
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
#if __name__ == "__main__":
# print "Hello World"
from WorkingWithFiles import Excel2PdfConversion
import jpype
import os.path
asposeapispath = os.path.join(os.path.abspath("../../../"), "lib/")
dataDir = os.path.join(os.path.abspath("./"), "data/")
print "You need to put your Aspose.Cells for Java APIs .jars in this folder:\n"+asposeapispath
#print dataDir
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.ext.dirs=%s" % asposeapispath)
hw = Excel2PdfConversion(dataDir)
hw.main()
|
asposecells/Aspose_Cells_Java
|
Plugins/Aspose-Cells-Java-for-Python/tests/WorkingWithFiles/Excel2PdfConversion/Excel2PdfConversion.py
|
Python
|
mit
| 668
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
Print a list of pages, as defined by page generator parameters.
Optionally, it also prints page content to STDOUT or save it to a file
in the current directory.
These parameters are supported to specify which pages titles to print:
-format Defines the output format.
Can be a custom string according to python string.format() notation
or can be selected by a number from following list
(1 is default format):
1 - u'{num:4d} {page.title}'
--> 10 PageTitle
2 - u'{num:4d} [[{page.title}]]'
--> 10 [[PageTitle]]
3 - u'{page.title}'
--> PageTitle
4 - u'[[{page.title}]]'
--> [[PageTitle]]
5 - u'{num:4d} \03{{lightred}}{page.loc_title:<40}\03{{default}}'
--> 10 localised_Namespace:PageTitle (colorised in lightred)
6 - u'{num:4d} {page.loc_title:<40} {page.can_title:<40}'
--> 10 localised_Namespace:PageTitle
canonical_Namespace:PageTitle
7 - u'{num:4d} {page.loc_title:<40} {page.trs_title:<40}'
--> 10 localised_Namespace:PageTitle
outputlang_Namespace:PageTitle
(*) requires "outputlang:lang" set.
num is the sequential number of the listed page.
An empty format is equal to -notitle and just shows the total
amount of pages.
-outputlang Language for translation of namespaces.
-notitle Page title is not printed.
-get Page content is printed.
-save Save Page content to a file named as page.title(as_filename=True).
Directory can be set with -save:dir_name
If no dir is specified, current direcory will be used.
-encode File encoding can be specified with '-encode:name' (name must be
a valid python encoding: utf-8, etc.).
If not specified, it defaults to config.textfile_encoding.
-put: Save the list to the defined page of the wiki. By default it does
not overwrite an exisiting page.
-overwrite Overwrite the page if it exists. Can only by applied with -put.
-summary: The summary text when the page is written. If it's one word just
containing letters, dashes and underscores it uses that as a
translation key.
Custom format can be applied to the following items extrapolated from a
page object:
site: obtained from page._link._site.
title: obtained from page._link._title.
loc_title: obtained from page._link.canonical_title().
can_title: obtained from page._link.ns_title().
based either the canonical namespace name or on the namespace name
in the language specified by the -trans param;
a default value '******' will be used if no ns is found.
onsite: obtained from pywikibot.Site(outputlang, self.site.family).
trs_title: obtained from page._link.ns_title(onsite=onsite).
If selected format requires trs_title, outputlang must be set.
¶ms;
"""
#
# (C) Pywikibot team, 2008-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import os
import re
import pywikibot
from pywikibot import config2 as config, i18n
from pywikibot.pagegenerators import GeneratorFactory, parameterHelp
docuReplacements = {'¶ms;': parameterHelp}
class Formatter(object):
"""Structure with Page attributes exposed for formatting from cmd line."""
fmt_options = {
'1': u"{num:4d} {page.title}",
'2': u"{num:4d} [[{page.title}]]",
'3': u"{page.title}",
'4': u"[[{page.title}]]",
'5': u"{num:4d} \03{{lightred}}{page.loc_title:<40}\03{{default}}",
'6': u"{num:4d} {page.loc_title:<40} {page.can_title:<40}",
'7': u"{num:4d} {page.loc_title:<40} {page.trs_title:<40}",
}
# Identify which formats need outputlang
fmt_need_lang = [k for k, v in fmt_options.items() if 'trs_title' in v]
def __init__(self, page, outputlang=None, default='******'):
"""
Constructor.
@param page: the page to be formatted.
@type page: Page object.
@param outputlang: language code in which namespace before title should
be translated.
Page ns will be searched in Site(outputlang, page.site.family)
and, if found, its custom name will be used in page.title().
@type outputlang: str or None, if no translation is wanted.
@param default: default string to be used if no corresponding
namespace is found when outputlang is not None.
"""
self.site = page._link.site
self.title = page._link.title
self.loc_title = page._link.canonical_title()
self.can_title = page._link.ns_title()
self.outputlang = outputlang
if outputlang is not None:
# Cache onsite in case of translations.
if not hasattr(self, "onsite"):
self.onsite = pywikibot.Site(outputlang, self.site.family)
try:
self.trs_title = page._link.ns_title(onsite=self.onsite)
# Fallback if no corresponding namespace is found in onsite.
except pywikibot.Error:
self.trs_title = u'%s:%s' % (default, page._link.title)
def output(self, num=None, fmt=1):
"""Output formatted string."""
fmt = self.fmt_options.get(fmt, fmt)
# If selected format requires trs_title, outputlang must be set.
if (fmt in self.fmt_need_lang or
'trs_title' in fmt and
self.outputlang is None):
raise ValueError(
u"Required format code needs 'outputlang' parameter set.")
if num is None:
return fmt.format(page=self)
else:
return fmt.format(num=num, page=self)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
gen = None
notitle = False
fmt = '1'
outputlang = None
page_get = False
base_dir = None
encoding = config.textfile_encoding
page_target = None
overwrite = False
summary = 'listpages-save-list'
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = GeneratorFactory()
for arg in local_args:
option, sep, value = arg.partition(':')
if option == '-notitle':
notitle = True
elif option == '-format':
fmt = value.replace('\\03{{', '\03{{')
if not fmt.strip():
notitle = True
elif option == '-outputlang:':
outputlang = value
elif option == '-get':
page_get = True
elif option == '-save':
base_dir = value or '.'
elif option == '-encode':
encoding = value
elif option == '-put':
page_target = value
elif option == '-overwrite':
overwrite = True
elif option == '-summary':
summary = value
else:
genFactory.handleArg(arg)
if base_dir:
base_dir = os.path.expanduser(base_dir)
if not os.path.isabs(base_dir):
base_dir = os.path.normpath(os.path.join(os.getcwd(), base_dir))
if not os.path.exists(base_dir):
pywikibot.output(u'Directory "%s" does not exist.' % base_dir)
choice = pywikibot.input_yn(
u'Do you want to create it ("No" to continue without saving)?')
if choice:
os.makedirs(base_dir, mode=0o744)
else:
base_dir = None
elif not os.path.isdir(base_dir):
# base_dir is a file.
pywikibot.warning(u'Not a directory: "%s"\n'
u'Skipping saving ...'
% base_dir)
base_dir = None
if page_target:
site = pywikibot.Site()
page_target = pywikibot.Page(site, page_target)
if not overwrite and page_target.exists():
pywikibot.bot.suggest_help(
additional_text='Page {0} already exists.\n'
'You can use the -overwrite argument to '
'replace the content of this page.'
.format(page_target.title(asLink=True)))
return False
if re.match('^[a-z_-]+$', summary):
summary = i18n.twtranslate(site, summary)
gen = genFactory.getCombinedGenerator()
if gen:
i = 0
output_list = []
for i, page in enumerate(gen, start=1):
if not notitle:
page_fmt = Formatter(page, outputlang)
output_list += [page_fmt.output(num=i, fmt=fmt)]
pywikibot.stdout(output_list[-1])
if page_get:
try:
pywikibot.stdout(page.text)
except pywikibot.Error as err:
pywikibot.output(err)
if base_dir:
filename = os.path.join(base_dir, page.title(as_filename=True))
pywikibot.output(u'Saving %s to %s' % (page.title(), filename))
with open(filename, mode='wb') as f:
f.write(page.text.encode(encoding))
pywikibot.output(u"%i page(s) found" % i)
if page_target:
page_target.text = '\n'.join(output_list)
page_target.save(summary=summary)
return True
else:
pywikibot.bot.suggest_help(missing_generator=True)
return False
if __name__ == "__main__":
main()
|
npdoty/pywikibot
|
scripts/listpages.py
|
Python
|
mit
| 9,907
|
"""Output formatters using shell syntax.
"""
from .base import SingleFormatter
import argparse
import six
class ShellFormatter(SingleFormatter):
def add_argument_group(self, parser):
group = parser.add_argument_group(
title='shell formatter',
description='a format a UNIX shell can parse (variable="value")',
)
group.add_argument(
'--variable',
action='append',
default=[],
dest='variables',
metavar='VARIABLE',
help=argparse.SUPPRESS,
)
group.add_argument(
'--prefix',
action='store',
default='',
dest='prefix',
help='add a prefix to all variable names',
)
def emit_one(self, column_names, data, stdout, parsed_args):
variable_names = [c.lower().replace(' ', '_')
for c in column_names
]
desired_columns = parsed_args.variables
for name, value in zip(variable_names, data):
if name in desired_columns or not desired_columns:
if isinstance(value, six.string_types):
value = value.replace('"', '\\"')
stdout.write('%s%s="%s"\n' % (parsed_args.prefix, name, value))
return
|
sjsucohort6/openstack
|
python/venv/lib/python2.7/site-packages/cliff/formatters/shell.py
|
Python
|
mit
| 1,337
|
from collections import defaultdict, namedtuple
from minecraft_data.v1_8 import recipes as raw_recipes
RecipeItem = namedtuple('RecipeItem', 'id meta amount')
class Recipe(object):
def __init__(self, raw):
self.result = reformat_item(raw['result'], None)
if 'ingredients' in raw:
self.ingredients = [reformat_item(item, 0)
for item in raw['ingredients']]
self.in_shape = None
self.out_shape = None
else:
self.in_shape = reformat_shape(raw['inShape'])
self.out_shape = reformat_shape(raw['outShape']) \
if 'outShape' in raw else None
self.ingredients = [item for row in self.in_shape for item in row]
@property
def total_ingredient_amounts(self):
"""
Returns:
dict: In the form { (item_id, metadata) -> amount }
"""
totals = defaultdict(int)
for id, meta, amount in self.ingredients:
totals[(id, meta)] += amount
return totals
@property
def ingredient_positions(self):
"""
Returns:
dict: In the form { (item_id, metadata) -> [(x, y, amount), ...] }
"""
positions = defaultdict(list)
for y, row in enumerate(self.in_shape):
for x, (item_id, metadata, amount) in enumerate(row):
positions[(item_id, metadata)].append((x, y, amount))
return positions
def reformat_item(raw, default_meta=None):
if isinstance(raw, dict):
raw = raw.copy() # do not modify arg
if 'metadata' not in raw:
raw['metadata'] = default_meta
if 'count' not in raw:
raw['count'] = 1
return RecipeItem(raw['id'], raw['metadata'], raw['count'])
elif isinstance(raw, list):
return RecipeItem(raw[0], raw[1], 1)
else: # single ID or None
return RecipeItem(raw or None, default_meta, 1)
def reformat_shape(shape):
return [[reformat_item(item, None) for item in row] for row in shape]
def iter_recipes(item_id, meta=None):
item_id = str(item_id)
meta = meta and int(meta)
try:
recipes_for_item = raw_recipes[item_id]
except KeyError:
return # no recipe found, do not yield anything
else:
for raw in recipes_for_item:
recipe = Recipe(raw)
if meta is None or meta == recipe.result.meta:
yield recipe
def get_any_recipe(item, meta=None):
# TODO return small recipes if present
for matching in iter_recipes(item, meta):
return matching
return None
|
Gjum/SpockBot
|
spockbot/mcdata/recipes.py
|
Python
|
mit
| 2,639
|
from sqlalchemy.testing import eq_, is_
from sqlalchemy.orm import backref, configure_mappers
from sqlalchemy import testing
from sqlalchemy import desc, select, func, exc
from sqlalchemy.orm import mapper, relationship, create_session, Query, \
attributes, exc as orm_exc, Session
from sqlalchemy.orm.dynamic import AppenderMixin
from sqlalchemy.testing import AssertsCompiledSQL, \
assert_raises_message, assert_raises
from test.orm import _fixtures
from sqlalchemy.testing.assertsql import CompiledSQL
class _DynamicFixture(object):
def _user_address_fixture(self, addresses_args={}):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(Address, lazy="dynamic",
**addresses_args)
})
mapper(Address, addresses)
return User, Address
def _order_item_fixture(self, items_args={}):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(Order, orders, properties={
'items': relationship(Item,
secondary=order_items,
lazy="dynamic",
**items_args
)
})
mapper(Item, items)
return Order, Item
class DynamicTest(_DynamicFixture, _fixtures.FixtureTest, AssertsCompiledSQL):
def test_basic(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
eq_([User(id=7,
addresses=[Address(id=1, email_address='jack@bean.com')])],
q.filter(User.id == 7).all())
eq_(self.static.user_address_result, q.all())
def test_statement(self):
"""test that the .statement accessor returns the actual statement that
would render, without any _clones called."""
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
u = q.filter(User.id == 7).first()
self.assert_compile(
u.addresses.statement,
"SELECT addresses.id, addresses.user_id, addresses.email_address "
"FROM "
"addresses WHERE :param_1 = addresses.user_id",
use_default_dialect=True
)
def test_detached_raise(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
sess.expunge(u)
assert_raises(
orm_exc.DetachedInstanceError,
u.addresses.filter_by,
email_address='e'
)
def test_no_uselist_false(self):
User, Address = self._user_address_fixture(
addresses_args={"uselist": False})
assert_raises_message(
exc.InvalidRequestError,
"On relationship User.addresses, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_no_m2o(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User, lazy='dynamic')
})
mapper(User, users)
assert_raises_message(
exc.InvalidRequestError,
"On relationship Address.user, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_order_by(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses.order_by(desc(Address.email_address))),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
def test_configured_order_by(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by":
addresses.c.email_address.desc()})
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
# test cancellation of None, replacement with something else
eq_(
list(u.addresses.order_by(None).order_by(Address.email_address)),
[
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
]
)
# test cancellation of None, replacement with nothing
eq_(
set(u.addresses.order_by(None)),
set([
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
])
)
def test_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).first()
eq_(u.addresses.count(), 1)
def test_dynamic_on_backref(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User,
backref=backref('addresses', lazy='dynamic'))
})
mapper(User, users)
sess = create_session()
ad = sess.query(Address).get(1)
def go():
ad.user = None
self.assert_sql_count(testing.db, go, 0)
sess.flush()
u = sess.query(User).get(7)
assert ad not in u.addresses
def test_no_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
# dynamic collection cannot implement __len__() (at least one that
# returns a live database result), else additional count() queries are
# issued when evaluating in a list context
def go():
eq_(
q.filter(User.id == 7).all(),
[
User(id=7,
addresses=[
Address(id=1, email_address='jack@bean.com')
])
]
)
self.assert_sql_count(testing.db, go, 2)
def test_no_populate(self):
User, Address = self._user_address_fixture()
u1 = User()
assert_raises_message(
NotImplementedError,
"Dynamic attributes don't support collection population.",
attributes.set_committed_value, u1, 'addresses', []
)
def test_m2m(self):
Order, Item = self._order_item_fixture(items_args={
"backref": backref("orders", lazy="dynamic")
})
sess = create_session()
o1 = Order(id=15, description="order 10")
i1 = Item(id=10, description="item 8")
o1.items.append(i1)
sess.add(o1)
sess.flush()
assert o1 in i1.orders.all()
assert i1 in o1.items.all()
@testing.exclude('mysql', 'between',
((5, 1, 49), (5, 1, 52)),
'https://bugs.launchpad.net/ubuntu/+source/mysql-5.1/+bug/706988')
def test_association_nonaliased(self):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(Order, orders, properties={
'items': relationship(Item,
secondary=order_items,
lazy="dynamic",
order_by=order_items.c.item_id)
})
mapper(Item, items)
sess = create_session()
o = sess.query(Order).first()
self.assert_compile(
o.items,
"SELECT items.id AS items_id, items.description AS "
"items_description FROM items,"
" order_items WHERE :param_1 = order_items.order_id AND "
"items.id = order_items.item_id"
" ORDER BY order_items.item_id",
use_default_dialect=True
)
# filter criterion against the secondary table
# works
eq_(
o.items.filter(order_items.c.item_id == 2).all(),
[Item(id=2)]
)
def test_transient_count(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
eq_(u1.addresses.count(), 1)
def test_transient_access(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
eq_(u1.addresses[0], Address())
def test_custom_query(self):
class MyQuery(Query):
pass
User, Address = self._user_address_fixture(
addresses_args={"query_class": MyQuery})
sess = create_session()
u = User()
sess.add(u)
col = u.addresses
assert isinstance(col, Query)
assert isinstance(col, MyQuery)
assert hasattr(col, 'append')
eq_(type(col).__name__, 'AppenderMyQuery')
q = col.limit(1)
assert isinstance(q, Query)
assert isinstance(q, MyQuery)
assert not hasattr(q, 'append')
eq_(type(q).__name__, 'MyQuery')
def test_custom_query_with_custom_mixin(self):
class MyAppenderMixin(AppenderMixin):
def add(self, items):
if isinstance(items, list):
for item in items:
self.append(item)
else:
self.append(items)
class MyQuery(Query):
pass
class MyAppenderQuery(MyAppenderMixin, MyQuery):
query_class = MyQuery
User, Address = self._user_address_fixture(
addresses_args={
"query_class": MyAppenderQuery})
sess = create_session()
u = User()
sess.add(u)
col = u.addresses
assert isinstance(col, Query)
assert isinstance(col, MyQuery)
assert hasattr(col, 'append')
assert hasattr(col, 'add')
eq_(type(col).__name__, 'MyAppenderQuery')
q = col.limit(1)
assert isinstance(q, Query)
assert isinstance(q, MyQuery)
assert not hasattr(q, 'append')
assert not hasattr(q, 'add')
eq_(type(q).__name__, 'MyQuery')
class UOWTest(_DynamicFixture, _fixtures.FixtureTest,
testing.AssertsExecutionResults):
run_inserts = None
def test_persistence(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture()
sess = create_session()
u1 = User(name='jack')
a1 = Address(email_address='foo')
sess.add_all([u1, a1])
sess.flush()
eq_(
testing.db.scalar(
select([func.count(1)]).where(addresses.c.user_id != None)
),
0
)
u1 = sess.query(User).get(u1.id)
u1.addresses.append(a1)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a1.id, u1.id, 'foo')]
)
u1.addresses.remove(a1)
sess.flush()
eq_(
testing.db.scalar(
select([func.count(1)]).where(addresses.c.user_id != None)
),
0
)
u1.addresses.append(a1)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a1.id, u1.id, 'foo')]
)
a2 = Address(email_address='bar')
u1.addresses.remove(a1)
u1.addresses.append(a2)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a2.id, u1.id, 'bar')]
)
def test_merge(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by": addresses.c.email_address})
sess = create_session()
u1 = User(name='jack')
a1 = Address(email_address='a1')
a2 = Address(email_address='a2')
a3 = Address(email_address='a3')
u1.addresses.append(a2)
u1.addresses.append(a3)
sess.add_all([u1, a1])
sess.flush()
u1 = User(id=u1.id, name='jack')
u1.addresses.append(a1)
u1.addresses.append(a3)
u1 = sess.merge(u1)
eq_(attributes.get_history(u1, 'addresses'), (
[a1],
[a3],
[a2]
))
sess.flush()
eq_(
list(u1.addresses),
[a1, a3]
)
def test_hasattr(self):
User, Address = self._user_address_fixture()
u1 = User(name='jack')
assert 'addresses' not in u1.__dict__
u1.addresses = [Address(email_address='test')]
assert 'addresses' in u1.__dict__
def test_collection_set(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by": addresses.c.email_address})
sess = create_session(autoflush=True, autocommit=False)
u1 = User(name='jack')
a1 = Address(email_address='a1')
a2 = Address(email_address='a2')
a3 = Address(email_address='a3')
a4 = Address(email_address='a4')
sess.add(u1)
u1.addresses = [a1, a3]
eq_(list(u1.addresses), [a1, a3])
u1.addresses = [a1, a2, a4]
eq_(list(u1.addresses), [a1, a2, a4])
u1.addresses = [a2, a3]
eq_(list(u1.addresses), [a2, a3])
u1.addresses = []
eq_(list(u1.addresses), [])
def test_noload_append(self):
# test that a load of User.addresses is not emitted
# when flushing an append
User, Address = self._user_address_fixture()
sess = Session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
sess.add(u1)
sess.commit()
u1_id = u1.id
sess.expire_all()
u1.addresses.append(Address(email_address='a2'))
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: [{"param_1": u1_id}]),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: [{'email_address': 'a2', 'user_id': u1_id}]
)
)
def test_noload_remove(self):
# test that a load of User.addresses is not emitted
# when flushing a remove
User, Address = self._user_address_fixture()
sess = Session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
a2 = Address(email_address='a2')
u1.addresses.append(a2)
sess.add(u1)
sess.commit()
u1_id = u1.id
a2_id = a2.id
sess.expire_all()
u1.addresses.remove(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: [{"param_1": u1_id}]),
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.email_address "
"AS addresses_email_address FROM addresses "
"WHERE addresses.id = :param_1",
lambda ctx: [{'param_1': a2_id}]
),
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE addresses.id = "
":addresses_id",
lambda ctx: [{'addresses_id': a2_id, 'user_id': None}]
)
)
def test_rollback(self):
User, Address = self._user_address_fixture()
sess = create_session(
expire_on_commit=False, autocommit=False, autoflush=True)
u1 = User(name='jack')
u1.addresses.append(Address(email_address='lala@hoho.com'))
sess.add(u1)
sess.flush()
sess.commit()
u1.addresses.append(Address(email_address='foo@bar.com'))
eq_(
u1.addresses.order_by(Address.id).all(),
[
Address(email_address='lala@hoho.com'),
Address(email_address='foo@bar.com')
]
)
sess.rollback()
eq_(
u1.addresses.all(),
[Address(email_address='lala@hoho.com')]
)
def _test_delete_cascade(self, expected):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "save-update" if expected \
else "all, delete"
})
sess = create_session(autoflush=True, autocommit=False)
u = User(name='ed')
u.addresses.extend(
[Address(email_address=letter) for letter in 'abcdef']
)
sess.add(u)
sess.commit()
eq_(testing.db.scalar(
addresses.count(addresses.c.user_id == None)), 0)
eq_(testing.db.scalar(
addresses.count(addresses.c.user_id != None)), 6)
sess.delete(u)
sess.commit()
if expected:
eq_(testing.db.scalar(
addresses.count(addresses.c.user_id == None)), 6)
eq_(testing.db.scalar(
addresses.count(addresses.c.user_id != None)), 0)
else:
eq_(testing.db.scalar(addresses.count()), 0)
def test_delete_nocascade(self):
self._test_delete_cascade(True)
def test_delete_cascade(self):
self._test_delete_cascade(False)
def test_remove_orphans(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "all, delete-orphan"
})
sess = create_session(autoflush=True, autocommit=False)
u = User(name='ed')
u.addresses.extend(
[Address(email_address=letter) for letter in 'abcdef']
)
sess.add(u)
for a in u.addresses.filter(
Address.email_address.in_(['c', 'e', 'f'])):
u.addresses.remove(a)
eq_(
set(ad for ad, in sess.query(Address.email_address)),
set(['a', 'b', 'd'])
)
def _backref_test(self, autoflush, saveuser):
User, Address = self._user_address_fixture(addresses_args={
"backref": "user",
})
sess = create_session(autoflush=autoflush, autocommit=False)
u = User(name='buffy')
a = Address(email_address='foo@bar.com')
a.user = u
if saveuser:
sess.add(u)
else:
sess.add(a)
if not autoflush:
sess.flush()
assert u in sess
assert a in sess
eq_(list(u.addresses), [a])
a.user = None
if not autoflush:
eq_(list(u.addresses), [a])
if not autoflush:
sess.flush()
eq_(list(u.addresses), [])
def test_backref_autoflush_saveuser(self):
self._backref_test(True, True)
def test_backref_autoflush_savead(self):
self._backref_test(True, False)
def test_backref_saveuser(self):
self._backref_test(False, True)
def test_backref_savead(self):
self._backref_test(False, False)
def test_backref_events(self):
User, Address = self._user_address_fixture(addresses_args={
"backref": "user",
})
u1 = User()
a1 = Address()
u1.addresses.append(a1)
is_(a1.user, u1)
def test_no_deref(self):
User, Address = self._user_address_fixture(addresses_args={
"backref": "user",
})
session = create_session()
user = User()
user.name = 'joe'
user.fullname = 'Joe User'
user.password = 'Joe\'s secret'
address = Address()
address.email_address = 'joe@joesdomain.example'
address.user = user
session.add(user)
session.flush()
session.expunge_all()
def query1():
session = create_session(testing.db)
user = session.query(User).first()
return user.addresses.all()
def query2():
session = create_session(testing.db)
return session.query(User).first().addresses.all()
def query3():
session = create_session(testing.db)
user = session.query(User).first()
return session.query(User).first().addresses.all()
eq_(query1(), [Address(email_address='joe@joesdomain.example')])
eq_(query2(), [Address(email_address='joe@joesdomain.example')])
eq_(query3(), [Address(email_address='joe@joesdomain.example')])
class HistoryTest(_DynamicFixture, _fixtures.FixtureTest):
run_inserts = None
def _transient_fixture(self, addresses_args={}):
User, Address = self._user_address_fixture(
addresses_args=addresses_args)
u1 = User()
a1 = Address()
return u1, a1
def _persistent_fixture(self, autoflush=True, addresses_args={}):
User, Address = self._user_address_fixture(
addresses_args=addresses_args)
u1 = User(name='u1')
a1 = Address(email_address='a1')
s = Session(autoflush=autoflush)
s.add(u1)
s.flush()
return u1, a1, s
def _persistent_m2m_fixture(self, autoflush=True, items_args={}):
Order, Item = self._order_item_fixture(items_args=items_args)
o1 = Order()
i1 = Item(description="i1")
s = Session(autoflush=autoflush)
s.add(o1)
s.flush()
return o1, i1, s
def _assert_history(self, obj, compare, compare_passive=None):
if isinstance(obj, self.classes.User):
attrname = "addresses"
elif isinstance(obj, self.classes.Order):
attrname = "items"
eq_(
attributes.get_history(obj, attrname),
compare
)
if compare_passive is None:
compare_passive = compare
eq_(
attributes.get_history(obj, attrname,
attributes.LOAD_AGAINST_COMMITTED),
compare_passive
)
def test_append_transient(self):
u1, a1 = self._transient_fixture()
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], [])
)
def test_append_persistent(self):
u1, a1, s = self._persistent_fixture()
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], [])
)
def test_remove_transient(self):
u1, a1 = self._transient_fixture()
u1.addresses.append(a1)
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], [])
)
def test_backref_pop_transient(self):
u1, a1 = self._transient_fixture(addresses_args={"backref": "user"})
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], []),
)
a1.user = None
# removed from added
self._assert_history(u1,
([], [], []),
)
def test_remove_persistent(self):
u1, a1, s = self._persistent_fixture()
u1.addresses.append(a1)
s.flush()
s.expire_all()
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], [a1])
)
def test_backref_pop_persistent_autoflush_o2m_active_hist(self):
u1, a1, s = self._persistent_fixture(
addresses_args={"backref":
backref("user", active_history=True)})
u1.addresses.append(a1)
s.flush()
s.expire_all()
a1.user = None
self._assert_history(u1,
([], [], [a1]),
)
def test_backref_pop_persistent_autoflush_m2m(self):
o1, i1, s = self._persistent_m2m_fixture(
items_args={"backref": "orders"})
o1.items.append(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1,
([], [], [i1]),
)
def test_backref_pop_persistent_noflush_m2m(self):
o1, i1, s = self._persistent_m2m_fixture(
items_args={"backref": "orders"}, autoflush=False)
o1.items.append(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1,
([], [], [i1]),
)
def test_unchanged_persistent(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture()
a2, a3 = Address(email_address='a2'), Address(email_address='a3')
u1.addresses.append(a1)
u1.addresses.append(a2)
s.flush()
u1.addresses.append(a3)
u1.addresses.remove(a2)
self._assert_history(u1,
([a3], [a1], [a2]),
compare_passive=([a3], [], [a2])
)
def test_replace_transient(self):
Address = self.classes.Address
u1, a1 = self._transient_fixture()
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), \
Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a2, a3, a4, a5], [], [])
)
def test_replace_persistent_noflush(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture(autoflush=False)
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), \
Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a2, a3, a4, a5], [], [])
)
def test_replace_persistent_autoflush(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture(autoflush=True)
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), \
Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a3, a4, a5], [a2], [a1]),
compare_passive=([a3, a4, a5], [], [a1])
)
def test_persistent_but_readded_noflush(self):
u1, a1, s = self._persistent_fixture(autoflush=False)
u1.addresses.append(a1)
s.flush()
u1.addresses.append(a1)
self._assert_history(u1,
([], [a1], []),
compare_passive=([a1], [], [])
)
def test_persistent_but_readded_autoflush(self):
u1, a1, s = self._persistent_fixture(autoflush=True)
u1.addresses.append(a1)
s.flush()
u1.addresses.append(a1)
self._assert_history(u1,
([], [a1], []),
compare_passive=([a1], [], [])
)
def test_missing_but_removed_noflush(self):
u1, a1, s = self._persistent_fixture(autoflush=False)
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], []),
compare_passive=([], [], [a1])
)
|
mitsuhiko/sqlalchemy
|
test/orm/test_dynamic.py
|
Python
|
mit
| 30,088
|
# -*- coding: utf-8 -*-
# Adapted from a contribution of Johan Dahlin
import collections
import errno
import re
import sys
try:
import multiprocessing
except ImportError: # Python 2.5
multiprocessing = None
import pep8
__all__ = ['multiprocessing', 'BaseQReport', 'QueueReport']
class BaseQReport(pep8.BaseReport):
"""Base Queue Report."""
_loaded = False # Windows support
# Reasoning for ignored error numbers is in-line below
ignored_errors = set([
# EPIPE: Added by sigmavirus24
# > If output during processing is piped to something that may close
# > its own stdin before we've finished printing results, we need to
# > catch a Broken pipe error and continue on.
# > (See also: https://gitlab.com/pycqa/flake8/issues/69)
errno.EPIPE,
# NOTE(sigmavirus24): When adding to this list, include the reasoning
# on the lines before the error code and always append your error
# code. Further, please always add a trailing `,` to reduce the visual
# noise in diffs.
])
def __init__(self, options):
assert options.jobs > 0
super(BaseQReport, self).__init__(options)
self.counters = collections.defaultdict(int)
self.n_jobs = options.jobs
# init queues
self.task_queue = multiprocessing.Queue()
self.result_queue = multiprocessing.Queue()
if sys.platform == 'win32':
# Work around http://bugs.python.org/issue10845
sys.modules['__main__'].__file__ = __file__
def _cleanup_queue(self, queue):
while not queue.empty():
queue.get_nowait()
def _put_done(self):
# collect queues
for i in range(self.n_jobs):
self.task_queue.put('DONE')
self.update_state(self.result_queue.get())
def _process_main(self):
if not self._loaded:
# Windows needs to parse again the configuration
from flake8.main import get_style_guide, DEFAULT_CONFIG
get_style_guide(parse_argv=True, config_file=DEFAULT_CONFIG)
for filename in iter(self.task_queue.get, 'DONE'):
self.input_file(filename)
def start(self):
super(BaseQReport, self).start()
self.__class__._loaded = True
# spawn processes
for i in range(self.n_jobs):
p = multiprocessing.Process(target=self.process_main)
p.daemon = True
p.start()
def stop(self):
try:
self._put_done()
except KeyboardInterrupt:
pass
finally:
# cleanup queues to unlock threads
self._cleanup_queue(self.result_queue)
self._cleanup_queue(self.task_queue)
super(BaseQReport, self).stop()
def process_main(self):
try:
self._process_main()
except KeyboardInterrupt:
pass
except IOError as ioerr:
# If we happen across an IOError that we aren't certain can/should
# be ignored, we should re-raise the exception.
if ioerr.errno not in self.ignored_errors:
raise
finally:
# ensure all output is flushed before main process continues
sys.stdout.flush()
sys.stderr.flush()
self.result_queue.put(self.get_state())
def get_state(self):
return {'total_errors': self.total_errors,
'counters': self.counters,
'messages': self.messages}
def update_state(self, state):
self.total_errors += state['total_errors']
for key, value in state['counters'].items():
self.counters[key] += value
self.messages.update(state['messages'])
class FileQReport(BaseQReport):
"""File Queue Report."""
print_filename = True
class QueueReport(pep8.StandardReport, BaseQReport):
"""Standard Queue Report."""
def get_file_results(self):
"""Print the result and return the overall count for this file."""
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
# stdout is block buffered when not stdout.isatty().
# line can be broken where buffer boundary since other processes
# write to same file.
# flush() after print() to avoid buffer boundary.
# Typical buffer size is 8192. line written safely when
# len(line) < 8192.
sys.stdout.flush()
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
sys.stdout.flush()
print(re.sub(r'\S', ' ', line[:offset]) + '^')
sys.stdout.flush()
if self._show_pep8 and doc:
print(' ' + doc.strip())
sys.stdout.flush()
return self.file_errors
|
wdv4758h/flake8
|
flake8/reporter.py
|
Python
|
mit
| 5,265
|
import argparse
import copy
import numpy as np
import chainer
from chainer.datasets import ConcatenatedDataset
from chainer.datasets import TransformDataset
from chainer.optimizer_hooks import WeightDecay
from chainer import serializers
from chainer import training
from chainer.training import extensions
from chainer.training import triggers
from chainercv.datasets import voc_bbox_label_names
from chainercv.datasets import VOCBboxDataset
from chainercv.extensions import DetectionVOCEvaluator
from chainercv.links.model.ssd import GradientScaling
from chainercv.links.model.ssd import multibox_loss
from chainercv.links import SSD300
from chainercv.links import SSD512
from chainercv import transforms
from chainercv.links.model.ssd import random_crop_with_bbox_constraints
from chainercv.links.model.ssd import random_distort
from chainercv.links.model.ssd import resize_with_random_interpolation
# https://docs.chainer.org/en/stable/tips.html#my-training-process-gets-stuck-when-using-multiprocessiterator
import cv2
cv2.setNumThreads(0)
class MultiboxTrainChain(chainer.Chain):
def __init__(self, model, alpha=1, k=3):
super(MultiboxTrainChain, self).__init__()
with self.init_scope():
self.model = model
self.alpha = alpha
self.k = k
def forward(self, imgs, gt_mb_locs, gt_mb_labels):
mb_locs, mb_confs = self.model(imgs)
loc_loss, conf_loss = multibox_loss(
mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, self.k)
loss = loc_loss * self.alpha + conf_loss
chainer.reporter.report(
{'loss': loss, 'loss/loc': loc_loss, 'loss/conf': conf_loss},
self)
return loss
class Transform(object):
def __init__(self, coder, size, mean):
# to send cpu, make a copy
self.coder = copy.copy(coder)
self.coder.to_cpu()
self.size = size
self.mean = mean
def __call__(self, in_data):
# There are five data augmentation steps
# 1. Color augmentation
# 2. Random expansion
# 3. Random cropping
# 4. Resizing with random interpolation
# 5. Random horizontal flipping
img, bbox, label = in_data
# 1. Color augmentation
img = random_distort(img)
# 2. Random expansion
if np.random.randint(2):
img, param = transforms.random_expand(
img, fill=self.mean, return_param=True)
bbox = transforms.translate_bbox(
bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])
# 3. Random cropping
img, param = random_crop_with_bbox_constraints(
img, bbox, return_param=True)
bbox, param = transforms.crop_bbox(
bbox, y_slice=param['y_slice'], x_slice=param['x_slice'],
allow_outside_center=False, return_param=True)
label = label[param['index']]
# 4. Resizing with random interpolatation
_, H, W = img.shape
img = resize_with_random_interpolation(img, (self.size, self.size))
bbox = transforms.resize_bbox(bbox, (H, W), (self.size, self.size))
# 5. Random horizontal flipping
img, params = transforms.random_flip(
img, x_random=True, return_param=True)
bbox = transforms.flip_bbox(
bbox, (self.size, self.size), x_flip=params['x_flip'])
# Preparation for SSD network
img -= self.mean
mb_loc, mb_label = self.coder.encode(bbox, label)
return img, mb_loc, mb_label
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('ssd300', 'ssd512'), default='ssd300')
parser.add_argument('--batchsize', type=int, default=32)
parser.add_argument('--iteration', type=int, default=120000)
parser.add_argument('--step', type=int, nargs='*', default=[80000, 100000])
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--out', default='result')
parser.add_argument('--resume')
args = parser.parse_args()
if args.model == 'ssd300':
model = SSD300(
n_fg_class=len(voc_bbox_label_names),
pretrained_model='imagenet')
elif args.model == 'ssd512':
model = SSD512(
n_fg_class=len(voc_bbox_label_names),
pretrained_model='imagenet')
model.use_preset('evaluate')
train_chain = MultiboxTrainChain(model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
train = TransformDataset(
ConcatenatedDataset(
VOCBboxDataset(year='2007', split='trainval'),
VOCBboxDataset(year='2012', split='trainval')
),
Transform(model.coder, model.insize, model.mean))
train_iter = chainer.iterators.MultiprocessIterator(train, args.batchsize)
test = VOCBboxDataset(
year='2007', split='test',
use_difficult=True, return_difficult=True)
test_iter = chainer.iterators.SerialIterator(
test, args.batchsize, repeat=False, shuffle=False)
# initial lr is set to 1e-3 by ExponentialShift
optimizer = chainer.optimizers.MomentumSGD()
optimizer.setup(train_chain)
for param in train_chain.params():
if param.name == 'b':
param.update_rule.add_hook(GradientScaling(2))
else:
param.update_rule.add_hook(WeightDecay(0.0005))
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(
updater, (args.iteration, 'iteration'), args.out)
trainer.extend(
extensions.ExponentialShift('lr', 0.1, init=1e-3),
trigger=triggers.ManualScheduleTrigger(args.step, 'iteration'))
trainer.extend(
DetectionVOCEvaluator(
test_iter, model, use_07_metric=True,
label_names=voc_bbox_label_names),
trigger=triggers.ManualScheduleTrigger(
args.step + [args.iteration], 'iteration'))
log_interval = 10, 'iteration'
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'lr',
'main/loss', 'main/loss/loc', 'main/loss/conf',
'validation/main/map']),
trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(
extensions.snapshot(),
trigger=triggers.ManualScheduleTrigger(
args.step + [args.iteration], 'iteration'))
trainer.extend(
extensions.snapshot_object(model, 'model_iter_{.updater.iteration}'),
trigger=(args.iteration, 'iteration'))
if args.resume:
serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
|
chainer/chainercv
|
examples/ssd/train.py
|
Python
|
mit
| 6,895
|
"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
import logging
import re
import requests
from utilities import f2i, h2f
from rpc_api import RpcApi
from auth_ptc import AuthPtc
from auth_google import AuthGoogle
from exceptions import AuthException, NotLoggedInException, ServerBusyOrOfflineException
import protos.RpcEnum_pb2 as RpcEnum
logger = logging.getLogger(__name__)
class PGoApi:
API_ENTRY = 'https://pgorelease.nianticlabs.com/plfe/rpc'
def __init__(self):
self.log = logging.getLogger(__name__)
self._auth_provider = None
self._api_endpoint = None
self._position_lat = 0
self._position_lng = 0
self._position_alt = 0
self._req_method_list = []
def call(self):
if not self._req_method_list:
return False
if self._auth_provider is None or not self._auth_provider.is_login():
self.log.info('Not logged in')
return False
player_position = self.get_position()
request = RpcApi(self._auth_provider)
if self._api_endpoint:
api_endpoint = self._api_endpoint
else:
api_endpoint = self.API_ENTRY
self.log.info('Execution of RPC')
response = None
try:
response = request.request(api_endpoint, self._req_method_list, player_position)
except ServerBusyOrOfflineException as e:
self.log.info('Server seems to be busy or offline - try again!')
# cleanup after call execution
self.log.info('Cleanup of request!')
self._req_method_list = []
return response
#def get_player(self):
def list_curr_methods(self):
for i in self._req_method_list:
print("{} ({})".format(RpcEnum.RequestMethod.Name(i),i))
def set_logger(self, logger):
self._ = logger or logging.getLogger(__name__)
def get_position(self):
return (self._position_lat, self._position_lng, self._position_alt)
def set_position(self, lat, lng, alt):
self.log.debug('Set Position - Lat: %s Long: %s Alt: %s', lat, lng, alt)
self._position_lat = f2i(lat)
self._position_lng = f2i(lng)
self._position_alt = f2i(alt)
def __getattr__(self, func):
def function(**kwargs):
if not self._req_method_list:
self.log.info('Create new request...')
name = func.upper()
if kwargs:
self._req_method_list.append( { RpcEnum.RequestMethod.Value(name): kwargs } )
self.log.info("Adding '%s' to RPC request including arguments", name)
self.log.debug("Arguments of '%s': \n\r%s", name, kwargs)
else:
self._req_method_list.append( RpcEnum.RequestMethod.Value(name) )
self.log.info("Adding '%s' to RPC request", name)
return self
if func.upper() in RpcEnum.RequestMethod.keys():
return function
else:
raise AttributeError
def login(self, provider, username, password):
if not isinstance(username, basestring) or not isinstance(password, basestring):
raise AuthException("Username/password not correctly specified")
if provider == 'ptc':
self._auth_provider = AuthPtc()
elif provider == 'google':
self._auth_provider = AuthGoogle()
else:
raise AuthException("Invalid authentication provider - only ptc/google available.")
self.log.debug('Auth provider: %s', provider)
if not self._auth_provider.login(username, password):
self.log.info('Login process failed')
return False
self.log.info('Starting RPC login sequence (app simulation)')
# making a standard call, like it is also done by the client
self.get_player()
self.get_hatched_eggs()
self.get_inventory()
self.check_awarded_badges()
self.download_settings(hash="4a2e9bc330dae60e7b74fc85b98868ab4700802e")
response = self.call()
if not response:
self.log.info('Login failed!')
return False
if 'api_url' in response:
self._api_endpoint = ('https://{}/rpc'.format(response['api_url']))
self.log.debug('Setting API endpoint to: %s', self._api_endpoint)
else:
self.log.error('Login failed - unexpected server response!')
return False
if 'auth_ticket' in response:
self._auth_provider.set_ticket(response['auth_ticket'].values())
self.log.info('Finished RPC login sequence (app simulation)')
self.log.info('Login process completed')
return True
|
charbec1/pokemapfuntimesyay
|
pogom/pgoapi/pgoapi.py
|
Python
|
mit
| 6,103
|
from __future__ import division
import numpy as np
from chainer.backends import cuda
import chainer.functions as F
from chainercv import transforms
exp_clip = np.log(1000 / 16)
def smooth_l1(x, t, beta):
return F.huber_loss(x, t, beta, reduce='no') / beta
# to avoid out of memory
def argsort(x):
xp = cuda.get_array_module(x)
i = np.argsort(cuda.to_cpu(x))
if xp is np:
return i
else:
return cuda.to_gpu(i)
# to avoid out of memory
def choice(x, size):
xp = cuda.get_array_module(x)
y = np.random.choice(cuda.to_cpu(x), size, replace=False)
if xp is np:
return y
else:
return cuda.to_gpu(y)
def scale_img(img, min_size, max_size):
"""Process image."""
_, H, W = img.shape
scale = min_size / min(H, W)
if scale * max(H, W) > max_size:
scale = max_size / max(H, W)
H, W = int(H * scale), int(W * scale)
img = transforms.resize(img, (H, W))
return img, scale
|
chainer/chainercv
|
chainercv/links/model/fpn/misc.py
|
Python
|
mit
| 974
|
"""
PageLayout
==========
.. image:: images/pagelayout.gif
:align: right
The :class:`PageLayout` class is used to create a simple multi-page
layout, in a way that allows easy flipping from one page to another using
borders.
:class:`PageLayout` does not currently honor the
:attr:`~kivy.uix.widget.Widget.size_hint`,
:attr:`~kivy.uix.widget.Widget.size_hint_min`,
:attr:`~kivy.uix.widget.Widget.size_hint_max`, or
:attr:`~kivy.uix.widget.Widget.pos_hint` properties.
.. versionadded:: 1.8.0
Example:
.. code-block:: kv
PageLayout:
Button:
text: 'page1'
Button:
text: 'page2'
Button:
text: 'page3'
Transitions from one page to the next are made by swiping in from the border
areas on the right or left hand side. If you wish to display multiple widgets
in a page, we suggest you use a containing layout. Ideally, each page should
consist of a single :mod:`~kivy.uix.layout` widget that contains the remaining
widgets on that page.
"""
__all__ = ('PageLayout', )
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, DictProperty
from kivy.animation import Animation
class PageLayout(Layout):
'''PageLayout class. See module documentation for more information.
'''
page = NumericProperty(0)
'''The currently displayed page.
:data:`page` is a :class:`~kivy.properties.NumericProperty` and defaults
to 0.
'''
border = NumericProperty('50dp')
'''The width of the border around the current page used to display
the previous/next page swipe areas when needed.
:data:`border` is a :class:`~kivy.properties.NumericProperty` and
defaults to 50dp.
'''
swipe_threshold = NumericProperty(.5)
'''The threshold used to trigger swipes as ratio of the widget
size.
:data:`swipe_threshold` is a :class:`~kivy.properties.NumericProperty`
and defaults to .5.
'''
anim_kwargs = DictProperty({'d': .5, 't': 'in_quad'})
'''The animation kwargs used to construct the animation
:data:`anim_kwargs` is a :class:`~kivy.properties.DictProperty`
and defaults to {'d': .5, 't': 'in_quad'}.
.. versionadded:: 1.11.0
'''
def __init__(self, **kwargs):
super(PageLayout, self).__init__(**kwargs)
trigger = self._trigger_layout
fbind = self.fbind
fbind('border', trigger)
fbind('page', trigger)
fbind('parent', trigger)
fbind('children', trigger)
fbind('size', trigger)
fbind('pos', trigger)
def do_layout(self, *largs):
l_children = len(self.children) - 1
h = self.height
x_parent, y_parent = self.pos
p = self.page
border = self.border
half_border = border / 2.
right = self.right
width = self.width - border
for i, c in enumerate(reversed(self.children)):
if i < p:
x = x_parent
elif i == p:
if not p: # it's first page
x = x_parent
elif p != l_children: # not first, but there are post pages
x = x_parent + half_border
else: # not first and there are no post pages
x = x_parent + border
elif i == p + 1:
if not p: # second page - no left margin
x = right - border
else: # there's already a left margin
x = right - half_border
else:
x = right
c.height = h
c.width = width
Animation(
x=x,
y=y_parent,
**self.anim_kwargs).start(c)
def on_touch_down(self, touch):
if (
self.disabled or
not self.collide_point(*touch.pos) or
not self.children
):
return
page = self.children[-self.page - 1]
if self.x <= touch.x < page.x:
touch.ud['page'] = 'previous'
touch.grab(self)
return True
elif page.right <= touch.x < self.right:
touch.ud['page'] = 'next'
touch.grab(self)
return True
return page.on_touch_down(touch)
def on_touch_move(self, touch):
if touch.grab_current != self:
return
p = self.page
border = self.border
half_border = border / 2.
page = self.children[-p - 1]
if touch.ud['page'] == 'previous':
# move next page up to right edge
if p < len(self.children) - 1:
self.children[-p - 2].x = min(
self.right - self.border * (1 - (touch.sx - touch.osx)),
self.right)
# move current page until edge hits the right border
if p >= 1:
b_right = half_border if p > 1 else border
b_left = half_border if p < len(self.children) - 1 else border
self.children[-p - 1].x = max(min(
self.x + b_left + (touch.x - touch.ox),
self.right - b_right),
self.x + b_left)
# move previous page left edge up to left border
if p > 1:
self.children[-p].x = min(
self.x + half_border * (touch.sx - touch.osx),
self.x + half_border)
elif touch.ud['page'] == 'next':
# move current page up to left edge
if p >= 1:
self.children[-p - 1].x = max(
self.x + half_border * (1 - (touch.osx - touch.sx)),
self.x)
# move next page until its edge hit the left border
if p < len(self.children) - 1:
b_right = half_border if p >= 1 else border
b_left = half_border if p < len(self.children) - 2 else border
self.children[-p - 2].x = min(max(
self.right - b_right + (touch.x - touch.ox),
self.x + b_left),
self.right - b_right)
# move second next page up to right border
if p < len(self.children) - 2:
self.children[-p - 3].x = max(
self.right + half_border * (touch.sx - touch.osx),
self.right - half_border)
return page.on_touch_move(touch)
def on_touch_up(self, touch):
if touch.grab_current == self:
if (
touch.ud['page'] == 'previous' and
abs(touch.x - touch.ox) / self.width > self.swipe_threshold
):
self.page -= 1
elif (
touch.ud['page'] == 'next' and
abs(touch.x - touch.ox) / self.width > self.swipe_threshold
):
self.page += 1
else:
self._trigger_layout()
touch.ungrab(self)
if len(self.children) > 1:
return self.children[-self.page + 1].on_touch_up(touch)
if __name__ == '__main__':
from kivy.base import runTouchApp
from kivy.uix.button import Button
pl = PageLayout()
for i in range(1, 4):
b = Button(text='page%s' % i)
pl.add_widget(b)
runTouchApp(pl)
|
rnixx/kivy
|
kivy/uix/pagelayout.py
|
Python
|
mit
| 7,321
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import Series, Timestamp
from pandas.compat import range, lmap
import pandas.core.common as com
import pandas.util.testing as tm
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assert_raises_regex(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_get_callable_name():
from functools import partial
getname = com._get_callable_name
def fn(x):
return x
lambda_ = lambda x: x
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x # noqa
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test_any_none():
assert (com._any_none(1, 2, 3, None))
assert (not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert (com._all_not_none(1, 2, 3, 4))
assert (not com._all_not_none(1, 2, 3, None))
assert (not com._all_not_none(None, None, None, None))
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2), (2, 3), (3, 4)]
result = list(com.iterpairs(data))
assert (result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert (result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert ((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert ([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert (a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_random_state():
import numpy.random as npr
# Check with seed
state = com._random_state(5)
assert state.uniform() == npr.RandomState(5).uniform()
# Check with random state object
state2 = npr.RandomState(10)
assert (com._random_state(state2).uniform() ==
npr.RandomState(10).uniform())
# check with no arg random state
assert com._random_state() is np.random
# Error for floats or strings
with pytest.raises(ValueError):
com._random_state('test')
with pytest.raises(ValueError):
com._random_state(5.5)
def test_maybe_match_name():
matched = com._maybe_match_name(
Series([1], name='x'), Series(
[2], name='x'))
assert (matched == 'x')
matched = com._maybe_match_name(
Series([1], name='x'), Series(
[2], name='y'))
assert (matched is None)
matched = com._maybe_match_name(Series([1]), Series([2], name='x'))
assert (matched is None)
matched = com._maybe_match_name(Series([1], name='x'), Series([2]))
assert (matched is None)
matched = com._maybe_match_name(Series([1], name='x'), [2])
assert (matched == 'x')
matched = com._maybe_match_name([1], Series([2], name='y'))
assert (matched == 'y')
def test_dict_compat():
data_datetime64 = {np.datetime64('1990-03-15'): 1,
np.datetime64('2015-03-15'): 2}
data_unchanged = {1: 2, 3: 4, 5: 6}
expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}
assert (com._dict_compat(data_datetime64) == expected)
assert (com._dict_compat(expected) == expected)
assert (com._dict_compat(data_unchanged) == data_unchanged)
|
lmallin/coverage_test
|
python_venv/lib/python2.7/site-packages/pandas/tests/test_common.py
|
Python
|
mit
| 4,870
|
import functools
import pfp.interp
def native(name, ret, interp=None, send_interp=False):
"""Used as a decorator to add the decorated function to the
pfp interpreter so that it can be used from within scripts.
:param str name: The name of the function as it will be exposed in template scripts.
:param pfp.fields.Field ret: The return type of the function (a class)
:param pfp.interp.PfpInterp interp: The specific interpreter to add the function to
:param bool send_interp: If the current interpreter should be passed to the function.
Examples:
The example below defines a ``Sum`` function that will return the sum of
all parameters passed to the function: ::
from pfp.fields import PYVAL
@native(name="Sum", ret=pfp.fields.Int64)
def sum_numbers(params, ctxt, scope, stream, coord):
res = 0
for param in params:
res += PYVAL(param)
return res
The code below is the code for the :any:`Int3 <pfp.native.dbg.int3>` function. Notice that it
requires that the interpreter be sent as a parameter: ::
@native(name="Int3", ret=pfp.fields.Void, send_interp=True)
def int3(params, ctxt, scope, stream, coord, interp):
if interp._no_debug:
return
if interp._int3:
interp.debugger = PfpDbg(interp)
interp.debugger.cmdloop()
"""
def native_decorator(func):
@functools.wraps(func)
def native_wrapper(*args, **kwargs):
return func(*args, **kwargs)
pfp.interp.PfpInterp.add_native(name, func, ret, interp=interp, send_interp=send_interp)
return native_wrapper
return native_decorator
def predefine(template):
pfp.interp.PfpInterp.add_predefine(template)
|
greenoaktree/pfp
|
pfp/native/__init__.py
|
Python
|
mit
| 1,639
|
# ------------------------------------------------------------------------------------------------
# Copyright (c) 2016 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
# Tutorial sample #5: Observations
import MalmoPython
import os
import sys
import time
import json
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
def Menger(xorg, yorg, zorg, size, blocktype, variant, holetype):
#draw solid chunk
genstring = GenCuboidWithVariant(xorg,yorg,zorg,xorg+size-1,yorg+size-1,zorg+size-1,blocktype,variant) + "\n"
#now remove holes
unit = size
while (unit >= 3):
w=unit/3
for i in xrange(0, size, unit):
for j in xrange(0, size, unit):
x=xorg+i
y=yorg+j
genstring += GenCuboid(x+w,y+w,zorg,(x+2*w)-1,(y+2*w)-1,zorg+size-1,holetype) + "\n"
y=yorg+i
z=zorg+j
genstring += GenCuboid(xorg,y+w,z+w,xorg+size-1, (y+2*w)-1,(z+2*w)-1,holetype) + "\n"
genstring += GenCuboid(x+w,yorg,z+w,(x+2*w)-1,yorg+size-1,(z+2*w)-1,holetype) + "\n"
unit/=3
return genstring
def GenCuboid(x1, y1, z1, x2, y2, z2, blocktype):
return '<DrawCuboid x1="' + str(x1) + '" y1="' + str(y1) + '" z1="' + str(z1) + '" x2="' + str(x2) + '" y2="' + str(y2) + '" z2="' + str(z2) + '" type="' + blocktype + '"/>'
def GenCuboidWithVariant(x1, y1, z1, x2, y2, z2, blocktype, variant):
return '<DrawCuboid x1="' + str(x1) + '" y1="' + str(y1) + '" z1="' + str(z1) + '" x2="' + str(x2) + '" y2="' + str(y2) + '" z2="' + str(z2) + '" type="' + blocktype + '" variant="' + variant + '"/>'
missionXML='''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Hello world!</Summary>
</About>
<ServerSection>
<ServerInitialConditions>
<Time>
<StartTime>1000</StartTime>
<AllowPassageOfTime>false</AllowPassageOfTime>
</Time>
<Weather>clear</Weather>
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,44*49,73,35:1,159:4,95:13,35:13,159:11,95:10,159:14,159:6,35:6,95:6;12;"/>
<DrawingDecorator>
<DrawSphere x="-27" y="70" z="0" radius="30" type="air"/>''' + Menger(-40, 40, -13, 27, "stone", "smooth_granite", "air") + '''
<DrawCuboid x1="-25" y1="39" z1="-2" x2="-29" y2="39" z2="2" type="lava"/>
<DrawCuboid x1="-26" y1="39" z1="-1" x2="-28" y2="39" z2="1" type="obsidian"/>
<DrawBlock x="-27" y="39" z="0" type="diamond_block"/>
</DrawingDecorator>
<ServerQuitFromTimeUp timeLimitMs="30000"/>
<ServerQuitWhenAnyAgentFinishes/>
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>MalmoTutorialBot</Name>
<AgentStart>
<Placement x="0.5" y="56.0" z="0.5" yaw="90"/>
<Inventory>
<InventoryItem slot="8" type="diamond_pickaxe"/>
</Inventory>
</AgentStart>
<AgentHandlers>
<ObservationFromFullStats/>
<ObservationFromGrid>
<Grid name="floor3x3">
<min x="-1" y="-1" z="-1"/>
<max x="1" y="-1" z="1"/>
</Grid>
</ObservationFromGrid>
<ContinuousMovementCommands turnSpeedDegs="180"/>
<InventoryCommands/>
<AgentQuitFromTouchingBlockType>
<Block type="diamond_block" />
</AgentQuitFromTouchingBlockType>
</AgentHandlers>
</AgentSection>
</Mission>'''
# Create default Malmo objects:
agent_host = MalmoPython.AgentHost()
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print 'ERROR:',e
print agent_host.getUsage()
exit(1)
if agent_host.receivedArgument("help"):
print agent_host.getUsage()
exit(0)
my_mission = MalmoPython.MissionSpec(missionXML, True)
my_mission_record = MalmoPython.MissionRecordSpec()
# Attempt to start a mission:
max_retries = 3
for retry in range(max_retries):
try:
agent_host.startMission( my_mission, my_mission_record )
break
except RuntimeError as e:
if retry == max_retries - 1:
print "Error starting mission:",e
exit(1)
else:
time.sleep(2)
# Loop until mission starts:
print "Waiting for the mission to start ",
world_state = agent_host.getWorldState()
while not world_state.is_mission_running:
sys.stdout.write(".")
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
print "Error:",error.text
print
print "Mission running ",
agent_host.sendCommand("hotbar.9 1") #Press the hotbar key
agent_host.sendCommand("hotbar.9 0") #Release hotbar key - agent should now be holding diamond_pickaxe
agent_host.sendCommand("pitch 0.2") #Start looking downward slowly
time.sleep(1) #Wait a second until we are looking in roughly the right direction
agent_host.sendCommand("pitch 0") #Stop tilting the camera
agent_host.sendCommand("move 1") #And start running...
agent_host.sendCommand("attack 1") #Whilst flailing our pickaxe!
jumping = False
# Loop until mission ends:
while world_state.is_mission_running:
sys.stdout.write(".")
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
print "Error:",error.text
if world_state.number_of_observations_since_last_state > 0:
msg = world_state.observations[-1].text
observations = json.loads(msg)
grid = observations.get(u'floor3x3', 0)
if jumping and grid[4]!=u'lava':
agent_host.sendCommand("jump 0")
jumping = False
if grid[3]==u'lava':
agent_host.sendCommand("jump 1")
jumping = True
print
print "Mission ended"
# Mission has ended.
|
murven/malmo
|
Malmo/samples/Python_examples/tutorial_5_solved.py
|
Python
|
mit
| 7,626
|
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from . import views
urlpatterns = [
url(r"^$", csrf_exempt(views.SamlView.as_view())),
url(r"^metadata/$", views.serve_metadata),
]
|
FinnStutzenstein/OpenSlides
|
server/openslides/saml/urls.py
|
Python
|
mit
| 229
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
"""
Contains functions related to working with the Nectar downloading library.
"""
from functools import partial
from nectar.config import DownloaderConfig
from pulp.common.plugins import importer_constants as constants
def importer_config_to_nectar_config(importer_config):
"""
Translates the Pulp standard importer configuration into a DownloaderConfig instance.
:param importer_config: use the PluginCallConfiguration.flatten method to retrieve a
single dict view on the configuration
:type importer_config: dict
:rtype: nectar.config.DownloaderConfig
"""
# Mapping of importer config key to downloader config key
translations = (
(constants.KEY_SSL_CA_CERT, 'ssl_ca_cert'),
(constants.KEY_SSL_VALIDATION, 'ssl_validation'),
(constants.KEY_SSL_CLIENT_CERT, 'ssl_client_cert'),
(constants.KEY_SSL_CLIENT_KEY, 'ssl_client_key'),
(constants.KEY_PROXY_HOST, 'proxy_url'),
(constants.KEY_PROXY_PORT, 'proxy_port'),
(constants.KEY_PROXY_USER, 'proxy_username'),
(constants.KEY_PROXY_PASS, 'proxy_password'),
(constants.KEY_MAX_DOWNLOADS, 'max_concurrent'),
(constants.KEY_MAX_SPEED, 'max_speed'),
)
download_config_kwargs = {}
adder = partial(_safe_add_arg, importer_config, download_config_kwargs)
map(adder, translations)
download_config = DownloaderConfig(**download_config_kwargs)
return download_config
def _safe_add_arg(importer_config, dl_config, keys_tuple):
"""
Utility to only set values in the downloader config if they are present in the importer's
config.
:type importer_config: dict
:type dl_config: dict
:param keys_tuple: tuple of importer key to download config key
:type keys_tuple: (str, str)
"""
if keys_tuple[0] in importer_config:
dl_config[keys_tuple[1]] = importer_config[keys_tuple[0]]
|
beav/pulp
|
server/pulp/plugins/util/nectar_config.py
|
Python
|
gpl-2.0
| 2,490
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for edit widgets.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '20/05/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
import os
from qgis.core import QgsFeature, QgsGeometry, QgsPoint, QgsVectorLayer, NULL
from qgis.gui import QgsEditorWidgetRegistry
from PyQt4 import QtCore
from qgis.testing import (start_app,
unittest
)
from utilities import unitTestDataPath
start_app()
class TestQgsTextEditWidget(unittest.TestCase):
@classmethod
def setUpClass(cls):
QgsEditorWidgetRegistry.initEditors()
def createLayerWithOnePoint(self):
self.layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
pr = self.layer.dataProvider()
f = QgsFeature()
f.setAttributes(["test", 123])
f.setGeometry(QgsGeometry.fromPoint(QgsPoint(100, 200)))
assert pr.addFeatures([f])
assert self.layer.pendingFeatureCount() == 1
return self.layer
def doAttributeTest(self, idx, expected):
reg = QgsEditorWidgetRegistry.instance()
configWdg = reg.createConfigWidget('TextEdit', self.layer, idx, None)
config = configWdg.config()
editwidget = reg.create('TextEdit', self.layer, idx, config, None, None)
editwidget.setValue('value')
assert editwidget.value() == expected[0]
editwidget.setValue(123)
assert editwidget.value() == expected[1]
editwidget.setValue(None)
assert editwidget.value() == expected[2]
editwidget.setValue(NULL)
assert editwidget.value() == expected[3]
def test_SetValue(self):
self.createLayerWithOnePoint()
self.doAttributeTest(0, ['value', '123', NULL, NULL])
self.doAttributeTest(1, [NULL, 123, NULL, NULL])
if __name__ == '__main__':
unittest.main()
|
NINAnor/QGIS
|
tests/src/python/test_qgseditwidgets.py
|
Python
|
gpl-2.0
| 2,315
|
################################################################################
#
# This program is part of the DellMon Zenpack for Zenoss.
# Copyright (C) 2008, 2009, 2010 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__='''
Delete the previous DRAC and Storage Controllers reports.
$Id:$
'''
from Products.ZenModel.ZenPack import ZenPackMigration
from Products.ZenModel.migrate.Migrate import Version
class removeOldDellReports(ZenPackMigration):
version = Version(2, 3)
def migrate(self, pack):
if hasattr(pack.dmd.Reports, 'Device Reports'):
devReports = pack.dmd.Reports['Device Reports']
if hasattr(devReports, 'Dell DRAC Controllers'):
devReports._delObject('Dell DRAC Controllers')
if hasattr(devReports, 'Dell Storage Controllers'):
devReports._delObject('Dell Storage Controllers')
removeOldDellReports()
|
epuzanov/ZenPacks.community.DellMon
|
ZenPacks/community/DellMon/migrate/removeOldDellReports.py
|
Python
|
gpl-2.0
| 1,108
|
import unittest
from PyFoam.Applications.ConvertToCSV import ConvertToCSV
theSuite=unittest.TestSuite()
|
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam
|
unittests/Applications/test_ConvertToCSV.py
|
Python
|
gpl-2.0
| 106
|
# -*- coding: utf-8 -*-
#
# This file is part of HEPData.
# Copyright (C) 2016 CERN.
#
# HEPData is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""HEPData application factories."""
import os
import sys
from invenio_base.app import create_app_factory
from invenio_base.wsgi import create_wsgi_factory
from invenio_config import create_config_loader
from . import config
env_prefix = 'APP'
conf_loader = create_config_loader(config=config, env_prefix=env_prefix)
instance_path = os.getenv(env_prefix + '_INSTANCE_PATH') or \
os.path.join(sys.prefix, 'var', 'hepdata-instance')
static_folder = os.getenv(env_prefix + '_STATIC_FOLDER') or \
os.path.join(instance_path, 'static')
create_api = create_app_factory(
'hepdata',
config_loader=conf_loader,
extension_entry_points=['invenio_base.api_apps'],
blueprint_entry_points=['invenio_base.api_blueprints'],
instance_path=instance_path,
)
create_app = create_app_factory(
'hepdata',
config_loader=conf_loader,
extension_entry_points=['invenio_base.apps'],
blueprint_entry_points=['invenio_base.blueprints'],
wsgi_factory=create_wsgi_factory({'/api': create_api}),
instance_path=instance_path,
static_folder=static_folder,
)
|
HEPData/hepdata3
|
hepdata/factory.py
|
Python
|
gpl-2.0
| 2,063
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio Demosite.
# Copyright (C) 2013 CERN.
#
# Invenio Demosite is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio Demosite is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from datetime import datetime
from fixture import DataSet
class OaiREPOSITORYData(DataSet):
class OaiREPOSITORY_2:
f1 = u'reportnumber'
f2 = u'division'
f3 = u''
setRecList = None
setDefinition = u'c=;p1=CERN;f1=reportnumber;m1=a;p2=(EP|PPE);f2=division;m2=r;p3=;f3=;m3=;'
last_updated = datetime.now()
id = 2
setSpec = u'cern:experiment'
setDescription = u''
p3 = u''
p1 = u'CERN'
setName = u'CERN experimental papers'
setCollection = u''
p2 = u'(EP|PPE)'
m1 = u'a'
m3 = u''
m2 = u'r'
class OaiREPOSITORY_3:
f1 = u'reportnumber'
f2 = u'division'
f3 = u''
setRecList = None
setDefinition = u'c=;p1=CERN;f1=reportnumber;m1=a;p2=TH;f2=division;m2=e;p3=;f3=;m3=;'
last_updated = datetime.now()
id = 3
setSpec = u'cern:theory'
setDescription = u''
p3 = u''
p1 = u'CERN'
setName = u'CERN theoretical papers'
setCollection = u''
p2 = u'TH'
m1 = u'a'
m3 = u''
m2 = u'e'
__all__ = ('OaiREPOSITORYData', )
|
hachreak/invenio-demosite
|
invenio_demosite/base/fixtures/oai_harvest.py
|
Python
|
gpl-2.0
| 1,982
|
#!/usr/bin/env python
# Support a YAML file hosts.yml as external inventory in Ansible
# Copyright (C) 2012 Jeroen Hoekx <jeroen@hoekx.be>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
File format:
- <hostname>
or
- host: <hostname>
vars:
- myvar: value
- myvbr: vblue
groups:
- mygroup1
- mygroup2
or
- group: <groupname>
vars:
- groupvar: value
hosts:
- myhost1
- myhost2
groups:
- subgroup1
- subgroup2
Any statement except the first definition is optional.
"""
import json
import os
import sys
from optparse import OptionParser
import yaml
class Host():
def __init__(self, name):
self.name = name
self.groups = []
self.vars = {}
def __repr__(self):
return "Host('%s')"%(self.name)
def set_variable(self, key, value):
self.vars[key] = value
def get_variables(self):
result = {}
for group in self.groups:
for k,v in group.get_variables().items():
result[k] = v
for k, v in self.vars.items():
result[k] = v
return result
def add_group(self, group):
if group not in self.groups:
self.groups.append(group)
class Group():
def __init__(self, name):
self.name = name
self.hosts = []
self.vars = {}
self.subgroups = []
self.parents = []
def __repr__(self):
return "Group('%s')"%(self.name)
def get_hosts(self):
""" List all hosts in this group, including subgroups """
result = [ host for host in self.hosts ]
for group in self.subgroups:
for host in group.get_hosts():
if host not in result:
result.append(host)
return result
def add_host(self, host):
if host not in self.hosts:
self.hosts.append(host)
host.add_group(self)
def add_subgroup(self, group):
if group not in self.subgroups:
self.subgroups.append(group)
group.add_parent(self)
def add_parent(self, group):
if group not in self.parents:
self.parents.append(group)
def set_variable(self, key, value):
self.vars[key] = value
def get_variables(self):
result = {}
for group in self.parents:
result.update( group.get_variables() )
result.update(self.vars)
return result
def find_group(name, groups):
for group in groups:
if name == group.name:
return group
def parse_vars(vars, obj):
### vars can be a list of dicts or a dictionary
if type(vars) == dict:
for k,v in vars.items():
obj.set_variable(k, v)
elif type(vars) == list:
for var in vars:
k,v = var.items()[0]
obj.set_variable(k, v)
def parse_yaml(yaml_hosts):
groups = []
all_hosts = Group('all')
ungrouped = Group('ungrouped')
groups.append(ungrouped)
### groups first, so hosts can be added to 'ungrouped' if necessary
subgroups = []
for entry in yaml_hosts:
if 'group' in entry and type(entry)==dict:
group = find_group(entry['group'], groups)
if not group:
group = Group(entry['group'])
groups.append(group)
if 'vars' in entry:
parse_vars(entry['vars'], group)
if 'hosts' in entry:
for host_name in entry['hosts']:
host = None
for test_host in all_hosts.get_hosts():
if test_host.name == host_name:
host = test_host
break
else:
host = Host(host_name)
all_hosts.add_host(host)
group.add_host(host)
if 'groups' in entry:
for subgroup in entry['groups']:
subgroups.append((group.name, subgroup))
for name, sub_name in subgroups:
group = find_group(name, groups)
subgroup = find_group(sub_name, groups)
group.add_subgroup(subgroup)
for entry in yaml_hosts:
### a host is either a dict or a single line definition
if type(entry) in [str, unicode]:
for test_host in all_hosts.get_hosts():
if test_host.name == entry:
break
else:
host = Host(entry)
all_hosts.add_host(host)
ungrouped.add_host(host)
elif 'host' in entry:
host = None
no_group = False
for test_host in all_hosts.get_hosts():
### all hosts contains only hosts already in groups
if test_host.name == entry['host']:
host = test_host
break
else:
host = Host(entry['host'])
all_hosts.add_host(host)
no_group = True
if 'vars' in entry:
parse_vars(entry['vars'], host)
if 'groups' in entry:
for test_group in groups:
if test_group.name in entry['groups']:
test_group.add_host(host)
all_hosts.add_host(host)
no_group = False
if no_group:
ungrouped.add_host(host)
return groups, all_hosts
parser = OptionParser()
parser.add_option('-l', '--list', default=False, dest="list_hosts", action="store_true")
parser.add_option('-H', '--host', default=None, dest="host")
parser.add_option('-e', '--extra-vars', default=None, dest="extra")
options, args = parser.parse_args()
base_dir = os.path.dirname(os.path.realpath(__file__))
hosts_file = os.path.join(base_dir, 'hosts.yml')
with open(hosts_file) as f:
yaml_hosts = yaml.safe_load( f.read() )
groups, all_hosts = parse_yaml(yaml_hosts)
if options.list_hosts == True:
result = {}
for group in groups:
result[group.name] = [host.name for host in group.get_hosts()]
print json.dumps(result)
sys.exit(0)
if options.host is not None:
result = {}
host = None
for test_host in all_hosts.get_hosts():
if test_host.name == options.host:
host = test_host
break
result = host.get_variables()
if options.extra:
k,v = options.extra.split("=")
result[k] = v
print json.dumps(result)
sys.exit(0)
parser.print_help()
sys.exit(1)
|
j2sol/ansible
|
plugins/inventory/yaml.py
|
Python
|
gpl-3.0
| 7,145
|
"""
This file is part of L3Morpho.
Author: Michael Gasser <gasser@cs.indiana.edu>
-----------------------------------------------------------------
internals.py is part of
Natural Language Toolkit: Internal utility functions
Copyright (C) 2001-2008 University of Pennsylvania
Author: Steven Bird <sb@csse.unimelb.edu.au>
Edward Loper <edloper@gradient.cis.upenn.edu>
URL: <http://www.nltk.org/>
License: <http://creativecommons.org/licenses/by-nc-nd/3.0/us/>
"""
import subprocess, os.path, re, warnings, textwrap
import types
######################################################################
# Regular Expression Processing
######################################################################
def convert_regexp_to_nongrouping(pattern):
"""
Convert all grouping parenthases in the given regexp pattern to
non-grouping parenthases, and return the result. E.g.:
>>> convert_regexp_to_nongrouping('ab(c(x+)(z*))?d')
'ab(?:c(?:x+)(?:z*))?d'
@type pattern: C{str}
@rtype: C{str}
"""
# Sanity check: back-references are not allowed!
for s in re.findall(r'\\.|\(\?P=', pattern):
if s[1] in '0123456789' or s == '(?P=':
raise ValueError('Regular expressions with back-references '
'are not supported: %r' % pattern)
# This regexp substitution function replaces the string '('
# with the string '(?:', but otherwise makes no changes.
def subfunc(m):
return re.sub('^\((\?P<[^>]*>)?$', '(?:', m.group())
# Scan through the regular expression. If we see any backslashed
# characters, ignore them. If we see a named group, then
# replace it with "(?:". If we see any open parens that are part
# of an extension group, ignore those too. But if we see
# any other open paren, replace it with "(?:")
return re.sub(r'''(?x)
\\. | # Backslashed character
\(\?P<[^>]*> | # Named group
\(\? | # Extension group
\( # Grouping parenthasis''', subfunc, pattern)
##########################################################################
# Java Via Command-Line
##########################################################################
_java_bin = None
_java_options = []
def config_java(bin=None, options=None):
"""
Configure nltk's java interface, by letting nltk know where it can
find the C{java} binary, and what extra options (if any) should be
passed to java when it is run.
@param bin: The full path to the C{java} binary. If not specified,
then nltk will search the system for a C{java} binary; and if
one is not found, it will raise a C{LookupError} exception.
@type bin: C{string}
@param options: A list of options that should be passed to the
C{java} binary when it is called. A common value is
C{['-Xmx512m']}, which tells the C{java} binary to increase
the maximum heap size to 512 megabytes. If no options are
specified, then do not modify the options list.
@type options: C{list} of C{string}
"""
global _java_bin, _java_options
if bin is not None:
if not os.path.exists(bin):
raise ValueError('Could not find java binary at %r' % bin)
_java_bin = bin
if options is not None:
if isinstance(options, basestring):
options = options.split()
_java_options = list(options)
# Check the JAVAHOME environment variable.
for env_var in ['JAVAHOME', 'JAVA_HOME']:
if _java_bin is None and env_var in os.environ:
paths = [os.path.join(os.environ[env_var], 'java'),
os.path.join(os.environ[env_var], 'bin', 'java')]
for path in paths:
if os.path.exists(path):
_java_bin = path
print('[Found java: %s]' % path)
# If we're on a POSIX system, try using the 'which' command to
# find a java binary.
if _java_bin is None and os.name == 'posix':
try:
p = subprocess.Popen(['which', 'java'], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
path = stdout.strip()
if path.endswith('java') and os.path.exists(path):
_java_bin = path
print('[Found java: %s]' % path)
except:
pass
if _java_bin is None:
raise LookupError('Unable to find java! Use config_java() '
'or set the JAVAHOME environment variable.')
def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None):
"""
Execute the given java command, by opening a subprocess that calls
C{java}. If java has not yet been configured, it will be configured
by calling L{config_java()} with no arguments.
@param cmd: The java command that should be called, formatted as
a list of strings. Typically, the first string will be the name
of the java class; and the remaining strings will be arguments
for that java class.
@type cmd: C{list} of C{string}
@param classpath: A C{':'} separated list of directories, JAR
archives, and ZIP archives to search for class files.
@type classpath: C{string}
@param stdin, stdout, stderr: Specify the executed programs'
standard input, standard output and standard error file
handles, respectively. Valid values are C{subprocess.PIPE},
an existing file descriptor (a positive integer), an existing
file object, and C{None}. C{subprocess.PIPE} indicates that a
new pipe to the child should be created. With C{None}, no
redirection will occur; the child's file handles will be
inherited from the parent. Additionally, stderr can be
C{subprocess.STDOUT}, which indicates that the stderr data
from the applications should be captured into the same file
handle as for stdout.
@return: A tuple C{(stdout, stderr)}, containing the stdout and
stderr outputs generated by the java command if the C{stdout}
and C{stderr} parameters were set to C{subprocess.PIPE}; or
C{None} otherwise.
@raise OSError: If the java command returns a nonzero return code.
"""
if isinstance(cmd, basestring):
raise TypeError('cmd should be a list of strings')
# Make sure we know where a java binary is.
if _java_bin is None:
config_java()
# Construct the full command string.
cmd = list(cmd)
if classpath is not None:
cmd = ['-cp', classpath] + cmd
cmd = [_java_bin] + _java_options + cmd
# Call java via a subprocess
p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr)
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print(stderr)
raise OSError('Java command failed!')
return (stdout, stderr)
if 0:
#config_java(options='-Xmx512m')
# Write:
#java('weka.classifiers.bayes.NaiveBayes',
# ['-d', '/tmp/names.model', '-t', '/tmp/train.arff'],
# classpath='/Users/edloper/Desktop/weka/weka.jar')
# Read:
(a,b) = java(['weka.classifiers.bayes.NaiveBayes',
'-l', '/tmp/names.model', '-T', '/tmp/test.arff',
'-p', '0'],#, '-distribution'],
classpath='/Users/edloper/Desktop/weka/weka.jar')
######################################################################
# Parsing
######################################################################
class ParseError(ValueError):
"""
Exception raised by parse_* functions when they fail.
@param position: The index in the input string where an error occured.
@param expected: What was expected when an error occured.
"""
def __init__(self, expected, position):
ValueError.__init__(self, expected, position)
self.expected = expected
self.position = position
def __str__(self):
return 'Expected %s at %s' % (self.expected, self.position)
_STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')")
def parse_str(s, start_position):
"""
If a Python string literal begins at the specified position in the
given string, then return a tuple C{(val, end_position)}
containing the value of the string literal and the position where
it ends. Otherwise, raise a L{ParseError}.
"""
# Read the open quote, and any modifiers.
m = _STRING_START_RE.match(s, start_position)
if not m: raise ParseError('open quote', start_position)
quotemark = m.group(1)
# Find the close quote.
_STRING_END_RE = re.compile(r'\\|%s' % quotemark)
position = m.end()
while True:
match = _STRING_END_RE.search(s, position)
if not match: raise ParseError('close quote', position)
if match.group(0) == '\\': position = match.end()+1
else: break
# Parse it, using eval. Strings with invalid escape sequences
# might raise ValueEerror.
try:
return eval(s[start_position:match.end()]), match.end()
except ValueError as e:
raise ParseError('valid string (%s)' % e, start)
_PARSE_INT_RE = re.compile(r'-?\d+')
def parse_int(s, start_position):
"""
If an integer begins at the specified position in the given
string, then return a tuple C{(val, end_position)} containing the
value of the integer and the position where it ends. Otherwise,
raise a L{ParseError}.
"""
m = _PARSE_INT_RE.match(s, start_position)
if not m: raise ParseError('integer', start_position)
return int(m.group()), m.end()
_PARSE_NUMBER_VALUE = re.compile(r'-?(\d*)([.]?\d*)?')
def parse_number(s, start_position):
"""
If an integer or float begins at the specified position in the
given string, then return a tuple C{(val, end_position)}
containing the value of the number and the position where it ends.
Otherwise, raise a L{ParseError}.
"""
m = _PARSE_NUMBER_VALUE.match(s, start_position)
if not m or not (m.group(1) or m.group(2)):
raise ParseError('number', start_position)
if m.group(2): return float(m.group()), m.end()
else: return int(m.group()), m.end()
######################################################################
# Check if a method has been overridden
######################################################################
def overridden(method):
"""
@return: True if C{method} overrides some method with the same
name in a base class. This is typically used when defining
abstract base classes or interfaces, to allow subclasses to define
either of two related methods:
>>> class EaterI:
... '''Subclass must define eat() or batch_eat().'''
... def eat(self, food):
... if overridden(self.batch_eat):
... return self.batch_eat([food])[0]
... else:
... raise NotImplementedError()
... def batch_eat(self, foods):
... return [self.eat(food) for food in foods]
@type method: instance method
"""
# [xx] breaks on classic classes!
if isinstance(method, types.MethodType) and method.im_class is not None:
name = method.__name__
funcs = [cls.__dict__[name]
for cls in _mro(method.im_class)
if name in cls.__dict__]
return len(funcs) > 1
else:
raise TypeError('Expected an instance method.')
def _mro(cls):
"""
Return the I{method resolution order} for C{cls} -- i.e., a list
containing C{cls} and all its base classes, in the order in which
they would be checked by C{getattr}. For new-style classes, this
is just cls.__mro__. For classic classes, this can be obtained by
a depth-first left-to-right traversal of C{__bases__}.
"""
if isinstance(cls, type):
return cls.__mro__
else:
mro = [cls]
for base in cls.__bases__: mro.extend(_mro(base))
return mro
######################################################################
# Deprecation decorator & base class
######################################################################
# [xx] dedent msg first if it comes from a docstring.
def _add_deprecated_field(obj, message):
"""Add a @deprecated field to a given object's docstring."""
indent = ''
# If we already have a docstring, then add a blank line to separate
# it from the new field, and check its indentation.
if obj.__doc__:
obj.__doc__ = obj.__doc__.rstrip()+'\n\n'
indents = re.findall(r'(?<=\n)[ ]+(?!\s)', obj.__doc__.expandtabs())
if indents: indent = min(indents)
# If we don't have a docstring, add an empty one.
else:
obj.__doc__ = ''
obj.__doc__ += textwrap.fill('@deprecated: %s' % message,
initial_indent=indent,
subsequent_indent=indent+' ')
def deprecated(message):
"""
A decorator used to mark functions as deprecated. This will cause
a warning to be printed the when the function is used. Usage:
>>> @deprecated('Use foo() instead')
>>> def bar(x):
... print x/10
"""
def decorator(func):
msg = ("Function %s() has been deprecated. %s"
% (func.__name__, message))
msg = '\n' + textwrap.fill(msg, initial_indent=' ',
subsequent_indent=' ')
def newFunc(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
# Copy the old function's name, docstring, & dict
newFunc.__dict__.update(func.__dict__)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__deprecated__ = True
# Add a @deprecated field to the docstring.
_add_deprecated_field(newFunc, message)
return newFunc
return decorator
class Deprecated(object):
"""
A base class used to mark deprecated classes. A typical usage is to
alert users that the name of a class has changed:
>>> class OldClassName(Deprecated, NewClassName):
... "Use NewClassName instead."
The docstring of the deprecated class will be used in the
deprecation warning message.
"""
def __new__(cls, *args, **kwargs):
# Figure out which class is the deprecated one.
dep_cls = None
for base in _mro(cls):
if Deprecated in base.__bases__:
dep_cls = base; break
assert dep_cls, 'Unable to determine which base is deprecated.'
# Construct an appropriate warning.
doc = dep_cls.__doc__ or ''.strip()
# If there's a @deprecated field, strip off the field marker.
doc = re.sub(r'\A\s*@deprecated:', r'', doc)
# Strip off any indentation.
doc = re.sub(r'(?m)^\s*', '', doc)
# Construct a 'name' string.
name = 'Class %s' % dep_cls.__name__
if cls != dep_cls:
name += ' (base class for %s)' % cls.__name__
# Put it all together.
msg = '%s has been deprecated. %s' % (name, doc)
# Wrap it.
msg = '\n' + textwrap.fill(msg, initial_indent=' ',
subsequent_indent=' ')
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
# Do the actual work of __new__.
return object.__new__(cls, *args, **kwargs)
##########################################################################
# COUNTER, FOR UNIQUE NAMING
##########################################################################
class Counter:
"""
A counter that auto-increments each time its value is read.
"""
def __init__(self, initial_value=0):
self._value = initial_value
def get(self):
self._value += 1
return self._value
|
LowResourceLanguages/hltdi-l3
|
l3xdg/morphology/internals.py
|
Python
|
gpl-3.0
| 16,052
|
# -*- coding: utf-8 -*-
__author__ = 'hal9000'
__all__ = ['Cache', 'CacheServer']
import socket
import time
import json
import hashlib
from sqlite3 import dbapi2 as sqlite
import xbmc
import log
import gui
import system
SOCKET = '127.0.0.1', 59999
CLEAR = 60*60*24 # 1 day
class SQL:
def __init__(self, name, version):
self.fs = system.FS('cache')
if self.fs.exists('sandbox://' + name + '.sqlite'):
self.con = sqlite.connect(self.fs('sandbox://' + name + '.sqlite'))
else:
self.con = sqlite.connect(self.fs('sandbox://' + name + '.sqlite'))
self.sql_set('pragma auto_vacuum=1')
self.sql_set('create table meta(data text)')
self.sql_set('insert into meta(data) values(?)', (json.dumps({'version': version, 'timeout': int(time.time()) + CLEAR}),))
self.sql_set('create table cache(token varchar(32) unique, expire integer, data text)')
self.sql_set('create index dataindex on cache(expire)')
self.meta_load()
def health(self, version):
if self.meta['version'] != version:
self.meta_save('version', version)
self.clear()
elif self.meta['timeout'] < int(time.time()):
self.sql_set('delete from cache where expire<?', (int(time.time()), ))
self.meta_save('timeout', int(time.time()) + CLEAR)
def get(self, token):
return self.sql_get('select data from cache where token=? and expire>? limit 1', (hashlib.md5(str(token)).hexdigest(), int(time.time())))
def set(self, token, expire, data):
try:
jsdata = json.dumps(data)
except:
pass
else:
self.sql_set('replace into cache(token,expire,data) values(?,?,?)', (hashlib.md5(str(token)).hexdigest(), int(time.time()) + expire, jsdata))
def clear(self):
self.sql_set('delete from cache')
self.meta_save('timeout', int(time.time()) + CLEAR)
# Private
def sql_get(self, sql, *args):
cur = self.con.cursor()
cur.execute(sql, *args)
rows = cur.fetchall()
cur.close()
try:
return json.loads(rows[0][0])
except:
return None
def sql_set(self, sql, *args):
cur = self.con.cursor()
cur.execute(sql, *args)
self.con.commit()
cur.close()
def meta_load(self):
self.meta = self.sql_get('select data from meta')
if not self.meta:
self.meta = {'version': '', 'timeout': 0}
def meta_save(self, key, value):
self.meta[key] = value
self.sql_set('update meta set data=?', (json.dumps(self.meta),))
class Base:
def recv(self, sock):
data = ''
length = ''
idle = time.time()
while True:
try:
if isinstance(length, basestring):
c = sock.recv(1)
if c == '.':
length = int(length)
else:
length += c
else:
data = sock.recv(length - len(data))
except socket.error, e:
if not e.errno in (10035, 35):
self.log('Recive', repr(e))
if e.errno in (22,):
self.log('Socket error 22')
return None
if idle + 10 < time.time():
self.log('Timeout')
return None
else:
if not isinstance(length, basestring) and len(data) == length:
try:
return json.loads(data)
except Exception, e:
self.log('JSON', repr(e))
return None
def send(self, sock, data):
try:
jsdata = json.dumps(data)
except:
jsdata = 'null'
sock.send(str(len(jsdata)) + '.' + jsdata)
def log(self, *args):
log.error(str(self.__class__.__name__), *args)
class Cache(Base):
def __init__(self, name, version=None):
self.name = str(name).strip()
self.version = str(version).strip()
def call(self, token, fun, *args, **kwargs):
cache = self._call([1, token])
if cache is not None:
return cache
res = fun(*args, **kwargs)
if res is None:
return None
else:
if isinstance(res, tuple) and len(res) == 2 and isinstance(res[0], int):
self._call([2, token, res[0], res[1]])
return res[1]
else:
return res
def clear(self):
self._call('clear')
def _call(self, data):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(SOCKET)
except socket.error, e:
if e.errno in (111,):
self.log("CacheServer isn't running")
else:
self.log('Connect', repr(e))
return None
except:
return None
else:
self.send(sock, [self.name, self.version] + data)
r = self.recv(sock)
sock.close()
return r
class CacheServer(Base):
def __init__(self):
self.sql = {}
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(SOCKET)
except Exception, e:
self.log('Bind', repr(e))
gui.message('Failed to start CacheServer. Check log.')
else:
sock.listen(1)
sock.setblocking(0)
idle = time.time()
while not xbmc.abortRequested:
try:
(client, address) = sock.accept()
except socket.error, e:
if e.errno == 11 or e.errno == 10035 or e.errno == 35:
if idle + 3 < time.time():
time.sleep(0.5)
continue
self.log('Accept', repr(e))
continue
except:
continue
else:
self.send(client, self.command(self.recv(client)))
idle = time.time()
sock.close()
def command(self, data):
if not data or not isinstance(data, list) or len(data) < 3 or data[2] not in (1, 2, 3):
return None
sql = self.open(data[0], data[1])
if not sql:
return None
if data[2] == 1 and len(data) == 4 and isinstance(data[3], basestring):
return sql.get(data[3])
elif data[2] == 2 and len(data) == 6 and isinstance(data[3], basestring) and isinstance(data[4], int):
sql.set(data[3], data[4], data[5])
return 1
elif data[2] == 3:
sql.clear()
return 1
return None
def open(self, db, version):
name = str(db).strip()
if not name:
return None
ver = str(version).strip()
if db not in self.sql:
self.sql[db] = SQL(db, ver)
self.sql[db].health(ver)
return self.sql[db]
|
chimkentec/KodiMODo_rep
|
script.module.xbmcup/lib/xbmcup/cache.py
|
Python
|
gpl-3.0
| 7,268
|
import numpy as np
from gpaw import KohnShamConvergenceError
class SCFLoop:
"""Self-consistent field loop.
converged: Do we have a self-consistent solution?
"""
def __init__(self, eigenstates=0.1, energy=0.1, density=0.1, maxiter=100,
fixdensity=False, niter_fixdensity=None):
self.max_eigenstates_error = max(eigenstates, 1e-20)
self.max_energy_error = energy
self.max_density_error = max(density, 1e-20)
self.maxiter = maxiter
self.fixdensity = fixdensity
if niter_fixdensity is None:
niter_fixdensity = 2
self.niter_fixdensity = niter_fixdensity
if fixdensity:
self.fix_density()
self.reset()
def fix_density(self):
self.fixdensity = True
self.niter_fixdensity = 10000000
self.max_density_error = np.inf
def reset(self):
self.energies = []
self.eigenstates_error = None
self.energy_error = None
self.density_error = None
self.converged = False
def run(self, wfs, hamiltonian, density, occupations):
if self.converged:
return
for iter in range(1, self.maxiter + 1):
wfs.eigensolver.iterate(hamiltonian, wfs)
occupations.calculate(wfs)
# XXX ortho, dens, wfs?
energy = hamiltonian.get_energy(occupations)
self.energies.append(energy)
self.check_convergence(density, wfs.eigensolver)
yield iter
if self.converged:
break
if iter > self.niter_fixdensity:
density.update(wfs)
hamiltonian.update(density)
else:
hamiltonian.npoisson = 0
# Don't fix the density in the next step:
self.niter_fixdensity = 0
def check_convergence(self, density, eigensolver):
"""Check convergence of eigenstates, energy and density."""
if self.converged:
return True
self.eigenstates_error = eigensolver.error
if len(self.energies) < 3:
self.energy_error = self.max_energy_error
else:
self.energy_error = np.ptp(self.energies[-3:])
self.density_error = density.mixer.get_charge_sloshing()
if self.density_error is None:
self.density_error = 1000000.0
self.converged = (
self.eigenstates_error < self.max_eigenstates_error and
self.energy_error < self.max_energy_error and
self.density_error < self.max_density_error)
return self.converged
|
qsnake/gpaw
|
gpaw/scf.py
|
Python
|
gpl-3.0
| 2,689
|
# Copyright (c) 2016 RIPE NCC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .base import Result, ParsingDict
class Response(ParsingDict):
def __init__(self, data, **kwargs):
ParsingDict.__init__(self, **kwargs)
self.raw_data = data
self.af = self.ensure("af", int)
self.body_size = self.ensure("bsize", int)
self.head_size = self.ensure("hsize", int)
self.destination_address = self.ensure("dst_addr", str)
self.source_address = self.ensure("src_addr", str)
self.code = self.ensure("res", int)
self.response_time = self.ensure("rt", float)
self.version = self.ensure("ver", str)
if not self.destination_address:
self.destination_address = self.ensure(
"addr", str, self.destination_address)
if not self.source_address:
self.source_address = self.ensure(
"srcaddr", str, self.source_address)
if not self.code:
self._handle_malformation("No response code available")
error = self.ensure("err", str)
if error:
self._handle_error(error)
class HttpResult(Result):
METHOD_GET = "GET"
METHOD_POST = "POST"
METHOD_PUT = "PUT"
METHOD_DELETE = "DELETE"
METHOD_HEAD = "HEAD"
METHODS = {
METHOD_GET: "GET",
METHOD_POST: "POST",
METHOD_PUT: "PUT",
METHOD_DELETE: "DELETE",
METHOD_HEAD: "HEAD"
}
def __init__(self, data, **kwargs):
Result.__init__(self, data, **kwargs)
self.uri = self.ensure("uri", str)
self.method = None
self.responses = []
if "result" not in self.raw_data:
self._handle_malformation("No result value found")
return
if isinstance(self.raw_data["result"], list):
# All modern results
for response in self.raw_data["result"]:
self.responses.append(Response(response, **kwargs))
if self.responses:
method = self.raw_data["result"][0].get(
"method",
self.raw_data["result"][0].get("mode") # Firmware == 4300
)
if method:
method = method.replace("4", "").replace("6", "")
if method in self.METHODS.keys():
self.method = self.METHODS[method]
else:
# Firmware <= 1
response = self.raw_data["result"].split(" ")
self.method = response[0].replace("4", "").replace("6", "")
self.responses.append(Response({
"dst_addr": response[1],
"rt": float(response[2]) * 1000,
"res": int(response[3]),
"hsize": int(response[4]),
"bsize": int(response[5]),
}))
__all__ = (
"HttpResult"
)
|
RIPE-NCC/ripe.atlas.sagan
|
ripe/atlas/sagan/http.py
|
Python
|
gpl-3.0
| 3,498
|
# sqlalchemy/ext/baked.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Baked query extension.
Provides a creational pattern for the :class:`.query.Query` object which
allows the fully constructed object, Core select statement, and string
compiled result to be fully cached.
"""
import copy
import logging
from .. import exc as sa_exc
from .. import util
from ..orm import exc as orm_exc
from ..orm import strategy_options
from ..orm.query import Query
from ..orm.session import Session
from ..sql import func
from ..sql import literal_column
from ..sql import util as sql_util
log = logging.getLogger(__name__)
class Bakery(object):
"""Callable which returns a :class:`.BakedQuery`.
This object is returned by the class method
:meth:`.BakedQuery.bakery`. It exists as an object
so that the "cache" can be easily inspected.
.. versionadded:: 1.2
"""
__slots__ = "cls", "cache"
def __init__(self, cls_, cache):
self.cls = cls_
self.cache = cache
def __call__(self, initial_fn, *args):
return self.cls(self.cache, initial_fn, args)
class BakedQuery(object):
"""A builder object for :class:`.query.Query` objects."""
__slots__ = "steps", "_bakery", "_cache_key", "_spoiled"
def __init__(self, bakery, initial_fn, args=()):
self._cache_key = ()
self._update_cache_key(initial_fn, args)
self.steps = [initial_fn]
self._spoiled = False
self._bakery = bakery
@classmethod
def bakery(cls, size=200, _size_alert=None):
"""Construct a new bakery.
:return: an instance of :class:`.Bakery`
"""
return Bakery(cls, util.LRUCache(size, size_alert=_size_alert))
def _clone(self):
b1 = BakedQuery.__new__(BakedQuery)
b1._cache_key = self._cache_key
b1.steps = list(self.steps)
b1._bakery = self._bakery
b1._spoiled = self._spoiled
return b1
def _update_cache_key(self, fn, args=()):
self._cache_key += (fn.__code__,) + args
def __iadd__(self, other):
if isinstance(other, tuple):
self.add_criteria(*other)
else:
self.add_criteria(other)
return self
def __add__(self, other):
if isinstance(other, tuple):
return self.with_criteria(*other)
else:
return self.with_criteria(other)
def add_criteria(self, fn, *args):
"""Add a criteria function to this :class:`.BakedQuery`.
This is equivalent to using the ``+=`` operator to
modify a :class:`.BakedQuery` in-place.
"""
self._update_cache_key(fn, args)
self.steps.append(fn)
return self
def with_criteria(self, fn, *args):
"""Add a criteria function to a :class:`.BakedQuery` cloned from this one.
This is equivalent to using the ``+`` operator to
produce a new :class:`.BakedQuery` with modifications.
"""
return self._clone().add_criteria(fn, *args)
def for_session(self, session):
"""Return a :class:`.Result` object for this :class:`.BakedQuery`.
This is equivalent to calling the :class:`.BakedQuery` as a
Python callable, e.g. ``result = my_baked_query(session)``.
"""
return Result(self, session)
def __call__(self, session):
return self.for_session(session)
def spoil(self, full=False):
"""Cancel any query caching that will occur on this BakedQuery object.
The BakedQuery can continue to be used normally, however additional
creational functions will not be cached; they will be called
on every invocation.
This is to support the case where a particular step in constructing
a baked query disqualifies the query from being cacheable, such
as a variant that relies upon some uncacheable value.
:param full: if False, only functions added to this
:class:`.BakedQuery` object subsequent to the spoil step will be
non-cached; the state of the :class:`.BakedQuery` up until
this point will be pulled from the cache. If True, then the
entire :class:`_query.Query` object is built from scratch each
time, with all creational functions being called on each
invocation.
"""
if not full and not self._spoiled:
_spoil_point = self._clone()
_spoil_point._cache_key += ("_query_only",)
self.steps = [_spoil_point._retrieve_baked_query]
self._spoiled = True
return self
def _effective_key(self, session):
"""Return the key that actually goes into the cache dictionary for
this :class:`.BakedQuery`, taking into account the given
:class:`.Session`.
This basically means we also will include the session's query_class,
as the actual :class:`_query.Query` object is part of what's cached
and needs to match the type of :class:`_query.Query` that a later
session will want to use.
"""
return self._cache_key + (session._query_cls,)
def _with_lazyload_options(self, options, effective_path, cache_path=None):
"""Cloning version of _add_lazyload_options.
"""
q = self._clone()
q._add_lazyload_options(options, effective_path, cache_path=cache_path)
return q
def _add_lazyload_options(self, options, effective_path, cache_path=None):
"""Used by per-state lazy loaders to add options to the
"lazy load" query from a parent query.
Creates a cache key based on given load path and query options;
if a repeatable cache key cannot be generated, the query is
"spoiled" so that it won't use caching.
"""
key = ()
if not cache_path:
cache_path = effective_path
if cache_path.path[0].is_aliased_class:
# paths that are against an AliasedClass are unsafe to cache
# with since the AliasedClass is an ad-hoc object.
self.spoil(full=True)
else:
for opt in options:
cache_key = opt._generate_cache_key(cache_path)
if cache_key is False:
self.spoil(full=True)
elif cache_key is not None:
key += cache_key
self.add_criteria(
lambda q: q._with_current_path(
effective_path
)._conditional_options(*options),
cache_path.path,
key,
)
def _retrieve_baked_query(self, session):
query = self._bakery.get(self._effective_key(session), None)
if query is None:
query = self._as_query(session)
self._bakery[self._effective_key(session)] = query.with_session(
None
)
return query.with_session(session)
def _bake(self, session):
query = self._as_query(session)
context = query._compile_context()
self._bake_subquery_loaders(session, context)
context.session = None
context.query = query = context.query.with_session(None)
query._execution_options = query._execution_options.union(
{"compiled_cache": self._bakery}
)
# we'll be holding onto the query for some of its state,
# so delete some compilation-use-only attributes that can take up
# space
for attr in (
"_correlate",
"_from_obj",
"_mapper_adapter_map",
"_joinpath",
"_joinpoint",
):
query.__dict__.pop(attr, None)
# if the query is not safe to cache, we still do everything as though
# we did cache it, since the receiver of _bake() assumes subqueryload
# context was set up, etc.
if context.query._bake_ok:
self._bakery[self._effective_key(session)] = context
return context
def to_query(self, query_or_session):
"""Return the :class:`_query.Query` object for use as a subquery.
This method should be used within the lambda callable being used
to generate a step of an enclosing :class:`.BakedQuery`. The
parameter should normally be the :class:`_query.Query` object that
is passed to the lambda::
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += lambda q: q.filter(
User.id == Address.user_id).correlate(Address)
main_bq = self.bakery(lambda s: s.query(Address))
main_bq += lambda q: q.filter(
sub_bq.to_query(q).exists())
In the case where the subquery is used in the first callable against
a :class:`.Session`, the :class:`.Session` is also accepted::
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += lambda q: q.filter(
User.id == Address.user_id).correlate(Address)
main_bq = self.bakery(
lambda s: s.query(Address.id, sub_bq.to_query(q).as_scalar())
)
:param query_or_session: a :class:`_query.Query` object or a class
:class:`.Session` object, that is assumed to be within the context
of an enclosing :class:`.BakedQuery` callable.
.. versionadded:: 1.3
"""
if isinstance(query_or_session, Session):
session = query_or_session
elif isinstance(query_or_session, Query):
session = query_or_session.session
if session is None:
raise sa_exc.ArgumentError(
"Given Query needs to be associated with a Session"
)
else:
raise TypeError(
"Query or Session object expected, got %r."
% type(query_or_session)
)
return self._as_query(session)
def _as_query(self, session):
query = self.steps[0](session)
for step in self.steps[1:]:
query = step(query)
return query
def _bake_subquery_loaders(self, session, context):
"""convert subquery eager loaders in the cache into baked queries.
For subquery eager loading to work, all we need here is that the
Query point to the correct session when it is run. However, since
we are "baking" anyway, we may as well also turn the query into
a "baked" query so that we save on performance too.
"""
context.attributes["baked_queries"] = baked_queries = []
for k, v in list(context.attributes.items()):
if isinstance(v, Query):
if "subquery" in k:
bk = BakedQuery(self._bakery, lambda *args: v)
bk._cache_key = self._cache_key + k
bk._bake(session)
baked_queries.append((k, bk._cache_key, v))
del context.attributes[k]
def _unbake_subquery_loaders(
self, session, context, params, post_criteria
):
"""Retrieve subquery eager loaders stored by _bake_subquery_loaders
and turn them back into Result objects that will iterate just
like a Query object.
"""
if "baked_queries" not in context.attributes:
return
for k, cache_key, query in context.attributes["baked_queries"]:
bk = BakedQuery(
self._bakery, lambda sess, q=query: q.with_session(sess)
)
bk._cache_key = cache_key
q = bk.for_session(session)
for fn in post_criteria:
q = q.with_post_criteria(fn)
context.attributes[k] = q.params(**params)
class Result(object):
"""Invokes a :class:`.BakedQuery` against a :class:`.Session`.
The :class:`.Result` object is where the actual :class:`.query.Query`
object gets created, or retrieved from the cache,
against a target :class:`.Session`, and is then invoked for results.
"""
__slots__ = "bq", "session", "_params", "_post_criteria"
def __init__(self, bq, session):
self.bq = bq
self.session = session
self._params = {}
self._post_criteria = []
def params(self, *args, **kw):
"""Specify parameters to be replaced into the string SQL statement."""
if len(args) == 1:
kw.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary."
)
self._params.update(kw)
return self
def _using_post_criteria(self, fns):
if fns:
self._post_criteria.extend(fns)
return self
def with_post_criteria(self, fn):
"""Add a criteria function that will be applied post-cache.
This adds a function that will be run against the
:class:`_query.Query` object after it is retrieved from the
cache. Functions here can be used to alter the query in ways
that **do not affect the SQL output**, such as execution options
and shard identifiers (when using a shard-enabled query object)
.. warning:: :meth:`.Result.with_post_criteria` functions are applied
to the :class:`_query.Query`
object **after** the query's SQL statement
object has been retrieved from the cache. Any operations here
which intend to modify the SQL should ensure that
:meth:`.BakedQuery.spoil` was called first.
.. versionadded:: 1.2
"""
return self._using_post_criteria([fn])
def _as_query(self):
q = self.bq._as_query(self.session).params(self._params)
for fn in self._post_criteria:
q = fn(q)
return q
def __str__(self):
return str(self._as_query())
def __iter__(self):
bq = self.bq
if not self.session.enable_baked_queries or bq._spoiled:
return iter(self._as_query())
baked_context = bq._bakery.get(bq._effective_key(self.session), None)
if baked_context is None:
baked_context = bq._bake(self.session)
context = copy.copy(baked_context)
context.session = self.session
context.attributes = context.attributes.copy()
bq._unbake_subquery_loaders(
self.session, context, self._params, self._post_criteria
)
context.statement.use_labels = True
if context.autoflush and not context.populate_existing:
self.session._autoflush()
q = context.query.params(self._params).with_session(self.session)
for fn in self._post_criteria:
q = fn(q)
return q._execute_and_instances(context)
def count(self):
"""return the 'count'.
Equivalent to :meth:`_query.Query.count`.
Note this uses a subquery to ensure an accurate count regardless
of the structure of the original statement.
.. versionadded:: 1.1.6
"""
col = func.count(literal_column("*"))
bq = self.bq.with_criteria(lambda q: q.from_self(col))
return bq.for_session(self.session).params(self._params).scalar()
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
Equivalent to :meth:`_query.Query.scalar`.
.. versionadded:: 1.1.6
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def first(self):
"""Return the first row.
Equivalent to :meth:`_query.Query.first`.
"""
bq = self.bq.with_criteria(lambda q: q.slice(0, 1))
ret = list(
bq.for_session(self.session)
.params(self._params)
._using_post_criteria(self._post_criteria)
)
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Equivalent to :meth:`_query.Query.one`.
"""
try:
ret = self.one_or_none()
except orm_exc.MultipleResultsFound as err:
util.raise_(
orm_exc.MultipleResultsFound(
"Multiple rows were found for one()"
),
replace_context=err,
)
else:
if ret is None:
raise orm_exc.NoResultFound("No row was found for one()")
return ret
def one_or_none(self):
"""Return one or zero results, or raise an exception for multiple
rows.
Equivalent to :meth:`_query.Query.one_or_none`.
.. versionadded:: 1.0.9
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()"
)
def all(self):
"""Return all rows.
Equivalent to :meth:`_query.Query.all`.
"""
return list(self)
def get(self, ident):
"""Retrieve an object based on identity.
Equivalent to :meth:`_query.Query.get`.
"""
query = self.bq.steps[0](self.session)
return query._get_impl(ident, self._load_on_pk_identity)
def _load_on_pk_identity(self, query, primary_key_identity):
"""Load the given primary key identity from the database."""
mapper = query._mapper_zero()
_get_clause, _get_params = mapper._get_clause
def setup(query):
_lcl_get_clause = _get_clause
q = query._clone()
q._get_condition()
q._order_by = None
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
nones = set(
[
_get_params[col].key
for col, value in zip(
mapper.primary_key, primary_key_identity
)
if value is None
]
)
_lcl_get_clause = sql_util.adapt_criterion_to_null(
_lcl_get_clause, nones
)
_lcl_get_clause = q._adapt_clause(_lcl_get_clause, True, False)
q._criterion = _lcl_get_clause
for fn in self._post_criteria:
q = fn(q)
return q
# cache the query against a key that includes
# which positions in the primary key are NULL
# (remember, we can map to an OUTER JOIN)
bq = self.bq
# add the clause we got from mapper._get_clause to the cache
# key so that if a race causes multiple calls to _get_clause,
# we've cached on ours
bq = bq._clone()
bq._cache_key += (_get_clause,)
bq = bq.with_criteria(
setup, tuple(elem is None for elem in primary_key_identity)
)
params = dict(
[
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(
primary_key_identity, mapper.primary_key
)
]
)
result = list(bq.for_session(self.session).params(**params))
l = len(result)
if l > 1:
raise orm_exc.MultipleResultsFound()
elif l:
return result[0]
else:
return None
@util.deprecated(
"1.2", "Baked lazy loading is now the default implementation."
)
def bake_lazy_loaders():
"""Enable the use of baked queries for all lazyloaders systemwide.
The "baked" implementation of lazy loading is now the sole implementation
for the base lazy loader; this method has no effect except for a warning.
"""
pass
@util.deprecated(
"1.2", "Baked lazy loading is now the default implementation."
)
def unbake_lazy_loaders():
"""Disable the use of baked queries for all lazyloaders systemwide.
This method now raises NotImplementedError() as the "baked" implementation
is the only lazy load implementation. The
:paramref:`_orm.relationship.bake_queries` flag may be used to disable
the caching of queries on a per-relationship basis.
"""
raise NotImplementedError(
"Baked lazy loading is now the default implementation"
)
@strategy_options.loader_option()
def baked_lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading with a "baked" query used in the load.
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"})
@baked_lazyload._add_unbound_fn
@util.deprecated(
"1.2",
"Baked lazy loading is now the default "
"implementation for lazy loading.",
)
def baked_lazyload(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, False, {}
)
@baked_lazyload._add_unbound_all_fn
@util.deprecated(
"1.2",
"Baked lazy loading is now the default "
"implementation for lazy loading.",
)
def baked_lazyload_all(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, True, {}
)
baked_lazyload = baked_lazyload._unbound_fn
baked_lazyload_all = baked_lazyload_all._unbound_all_fn
bakery = BakedQuery.bakery
|
Vagab0nd/SiCKRAGE
|
lib3/sqlalchemy/ext/baked.py
|
Python
|
gpl-3.0
| 21,989
|
#!/usr/bin/env python
import math
import re
EVERYTHING_BEFORE_PARAMS = """id: qtgui_time_sink_x
label: QT GUI Time Sink
parameters:
- id: type
label: Type
dtype: enum
default: complex
options: [complex, float, msg_complex, msg_float]
option_labels: [Complex, Float, Complex Message, Float Message]
option_attributes:
fcn: [time_sink_c, time_sink_f, time_sink_c, time_sink_f]
t: [complex, float, message, message]
hide: part
- id: name
label: Name
dtype: string
default: '""'
hide: ${ ('none' if len(name) > 0 else 'part') }
- id: ylabel
label: Y Axis Label
dtype: string
default: Amplitude
hide: part
- id: yunit
label: Y Axis Unit
dtype: string
default: '""'
hide: part
- id: size
label: Number of Points
dtype: int
default: '1024'
hide: ${ ('all' if type.startswith('msg') else 'none') }
- id: srate
label: Sample Rate
dtype: float
default: samp_rate
- id: grid
label: Grid
dtype: enum
default: 'False'
options: ['True', 'False']
option_labels: ['Yes', 'No']
hide: part
- id: autoscale
label: Autoscale
dtype: enum
default: 'False'
options: ['True', 'False']
option_labels: ['Yes', 'No']
- id: ymin
label: Y min
dtype: float
default: '-1'
hide: part
- id: ymax
label: Y max
dtype: float
default: '1'
hide: part
- id: nconnections
label: Number of Inputs
dtype: int
default: '1'
hide: ${ ('all' if type.startswith('msg') else 'part') }
- id: update_time
label: Update Period
dtype: float
default: '0.10'
hide: part
- id: entags
label: Disp. Tags
dtype: enum
default: 'True'
options: ['True', 'False']
option_labels: ['Yes', 'No']
hide: ${ ('all' if type.startswith('msg') else 'part') }
- id: gui_hint
label: GUI Hint
dtype: gui_hint
hide: part
- id: tr_mode
label: Trigger Mode
category: Trigger
dtype: enum
default: qtgui.TRIG_MODE_FREE
options: [qtgui.TRIG_MODE_FREE, qtgui.TRIG_MODE_AUTO, qtgui.TRIG_MODE_NORM, qtgui.TRIG_MODE_TAG]
option_labels: [Free, Auto, Normal, Tag]
hide: part
- id: tr_slope
label: Trigger Slope
category: Trigger
dtype: enum
default: qtgui.TRIG_MODE_POS
options: [qtgui.TRIG_SLOPE_POS, qtgui.TRIG_SLOPE_NEG]
option_labels: [Positive, Negative]
hide: part
- id: tr_level
label: Trigger Level
category: Trigger
dtype: float
default: '0.0'
hide: part
- id: tr_delay
label: Trigger Delay
category: Trigger
dtype: float
default: '0'
hide: part
- id: tr_chan
label: Trigger Channel
category: Trigger
dtype: int
default: '0'
hide: part
- id: tr_tag
label: Trigger Tag Key
category: Trigger
dtype: string
default: '""'
hide: part
- id: ctrlpanel
label: Control Panel
category: Config
dtype: enum
default: 'False'
options: ['True', 'False']
option_labels: ['Yes', 'No']
hide: part
- id: legend
label: Legend
category: Config
dtype: enum
default: 'True'
options: ['True', 'False']
option_labels: ['Yes', 'No']
hide: part
- id: axislabels
label: Axis Labels
category: Config
dtype: enum
default: 'True'
options: ['True', 'False']
option_labels: ['Yes', 'No']
hide: part
- id: stemplot
label: Stem Plot
category: Config
dtype: enum
default: 'False'
options: ['True', 'False']
option_labels: ['Yes', 'No']
hide: part"""
LINE_PARAMS = """
- id: label{i}
label: Line {i} Label
dtype: string
default: 'Signal {i}'
base_key: label1
hide: ${{ ('part' if (
int(nconnections) >= {i}
or (type == "complex" and int(nconnections) >= {i_cplx})
or (type == "msg_complex" and {i_cplx} <= 1)
or (type == "msg_float" and {i} <= 1))
else 'all')
}}
category: Config
- id: width{i}
label: Line {i} Width
default: 1
base_key: width1
hide: ${{ ('part' if (
int(nconnections) >= {i}
or (type == "complex" and int(nconnections) >= {i_cplx})
or (type == "msg_complex" and {i_cplx} <= 1)
or (type == "msg_float" and {i} <= 1))
else 'all')
}}
category: Config
- id: color{i}
label: Line {i} Color
dtype: enum
options: ['blue', 'red', 'green', 'black', 'cyan', 'magenta', 'yellow', 'dark red', 'dark green', 'dark blue']
option_labels: ['Blue', 'Red', 'Green', 'Black', 'Cyan', 'Magenta', 'Yellow', 'Dark Red', 'Dark Green', 'Dark Blue']
default: '{i_color}'
base_key: color1
hide: ${{ ('part' if (
int(nconnections) >= {i}
or (type == "complex" and int(nconnections) >= {i_cplx})
or (type == "msg_complex" and {i_cplx} <= 1)
or (type == "msg_float" and {i} <= 1))
else 'all')
}}
category: Config
- id: style{i}
label: Line {i} Style
dtype: enum
options: ['1','2','3','4','5','0']
option_labels: ['Solid','Dash','Dots','Dash-Dot','Dash-Dot-Dot']
default: 1
base_key: style1
hide: ${{ ('part' if (
int(nconnections) >= {i}
or (type == "complex" and int(nconnections) >= {i_cplx})
or (type == "msg_complex" and {i_cplx} <= 1)
or (type == "msg_float" and {i} <= 1))
else 'all')
}}
category: Config
- id: marker{i}
label: Line {i} Marker
dtype: enum
options: ['-1','0','1','2','3','4','5','6','7','8','9']
option_labels: ['None','Circle','Rectangle','Diamond','Triangle','Down Triangle','Left Triangle','Right Triangle','Cross','X-Cross']
default: -1
base_key: marker1
hide: ${{ ('part' if (
int(nconnections) >= {i}
or (type == "complex" and int(nconnections) >= {i_cplx})
or (type == "msg_complex" and {i_cplx} <= 1)
or (type == "msg_float" and {i} <= 1))
else 'all')
}}
category: Config
- id: alpha{i}
label: Line {i} Alpha
dtype: real
default: 1.0
base_key: alpha1
hide: ${{ ('part' if (
int(nconnections) >= {i}
or (type == "complex" and int(nconnections) >= {i_cplx})
or (type == "msg_complex" and {i_cplx} <= 1)
or (type == "msg_float" and {i} <= 1))
else 'all')
}}
category: Config
"""
EVERYTHING_AFTER_PARAMS = """
asserts:
- ${nconnections <= (5 if type == 'complex' else 10)}
inputs:
- domain: stream
dtype: ${ type.t }
multiplicity: ${ (0 if type.startswith('msg') else nconnections) }
optional: ${ (True if type.startswith('msg') else False) }
templates:
imports: |-
from PyQt5 import Qt
from gnuradio import qtgui
from gnuradio.filter import firdes
import sip
callbacks:
- set_time_domain_axis(${min}, ${max})
- set_update_time(${update_time})
- set_y_axis(${ymin}, ${ymax})
- set_samp_rate(${srate})
- self.${id}.set_trigger_mode(${tr_mode}, ${tr_slope}, ${tr_level}, ${tr_delay},
${tr_chan}, ${tr_tag})
make: |-
<%
win = 'self._%s_win'%id
%>\\
qtgui.${type.fcn}(
${size}, #size
${srate}, #samp_rate
${name}, #name
${0 if type.startswith('msg') else nconnections}, #number of inputs
None # parent
)
self.${id}.set_update_time(${update_time})
self.${id}.set_y_axis(${ymin}, ${ymax})
self.${id}.set_y_label(${ylabel}, ${yunit})
self.${id}.enable_tags(${entags})
self.${id}.set_trigger_mode(${tr_mode}, ${tr_slope}, ${tr_level}, ${tr_delay}, ${tr_chan}, ${tr_tag})
self.${id}.enable_autoscale(${autoscale})
self.${id}.enable_grid(${grid})
self.${id}.enable_axis_labels(${axislabels})
self.${id}.enable_control_panel(${ctrlpanel})
self.${id}.enable_stem_plot(${stemplot})
% if legend == "False":
self.${id}.disable_legend()
% endif
labels = [${label1}, ${label2}, ${label3}, ${label4}, ${label5},
${label6}, ${label7}, ${label8}, ${label9}, ${label10}]
widths = [${width1}, ${width2}, ${width3}, ${width4}, ${width5},
${width6}, ${width7}, ${width8}, ${width9}, ${width10}]
colors = ['${color1}', '${color2}', '${color3}', '${color4}', '${color5}',
'${color6}', '${color7}', '${color8}', '${color9}', '${color10}']
alphas = [${alpha1}, ${alpha2}, ${alpha3}, ${alpha4}, ${alpha5},
${alpha6}, ${alpha7}, ${alpha8}, ${alpha9}, ${alpha10}]
styles = [${style1}, ${style2}, ${style3}, ${style4}, ${style5},
${style6}, ${style7}, ${style8}, ${style9}, ${style10}]
markers = [${marker1}, ${marker2}, ${marker3}, ${marker4}, ${marker5},
${marker6}, ${marker7}, ${marker8}, ${marker9}, ${marker10}]
% if type.endswith('complex'):
for i in range(${2 if type.startswith('msg') else 2*int(nconnections)}):
if len(labels[i]) == 0:
if (i % 2 == 0):
self.${id}.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.${id}.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.${id}.set_line_label(i, labels[i])
self.${id}.set_line_width(i, widths[i])
self.${id}.set_line_color(i, colors[i])
self.${id}.set_line_style(i, styles[i])
self.${id}.set_line_marker(i, markers[i])
self.${id}.set_line_alpha(i, alphas[i])
% else:
for i in range(${1 if type.startswith('msg') else int(nconnections)}):
if len(labels[i]) == 0:
self.${id}.set_line_label(i, "Data {0}".format(i))
else:
self.${id}.set_line_label(i, labels[i])
self.${id}.set_line_width(i, widths[i])
self.${id}.set_line_color(i, colors[i])
self.${id}.set_line_style(i, styles[i])
self.${id}.set_line_marker(i, markers[i])
self.${id}.set_line_alpha(i, alphas[i])
% endif
${win} = sip.wrapinstance(self.${id}.pyqwidget(), Qt.QWidget)
${gui_hint() % win}
documentation: |-
The GUI hint can be used to position the widget within the application. The hint is of the form [tab_id@tab_index]: [row, col, row_span, col_span]. Both the tab specification and the grid position are optional.
file_format: 1
"""
def make_yml():
"""Return the YML file as a string"""
default_colors = [
'blue', 'red', 'green', 'black', 'cyan', 'magenta', 'yellow',
'dark red', 'dark green', 'dark blue'
]
line_params_1 = LINE_PARAMS.format(i=1, i_cplx=1, i_color=default_colors[0])
line_params_1 = re.sub(r' base_key:.*\n', '', line_params_1)
line_params_n = ''.join([
LINE_PARAMS.format(
i=i,
i_cplx=int(math.ceil(float(i)/2)),
i_color=default_colors[(i-1) % len(default_colors)],
)
for i in range(2, 11)
])
return ''.join((
EVERYTHING_BEFORE_PARAMS,
line_params_1,
line_params_n,
EVERYTHING_AFTER_PARAMS,
))
if __name__ == '__main__':
import sys
try:
filename = sys.argv[1]
except IndexError:
filename = __file__[:-3]
data = make_yml()
with open(filename, 'wb') as fp:
fp.write(data.encode())
|
mbr0wn/gnuradio
|
gr-qtgui/grc/qtgui_time_sink_x.block.yml.py
|
Python
|
gpl-3.0
| 11,608
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import sys
import nupic.bindings.engine_internal as engine
from nupic.support.lockattributes import LockAttributesMixin
import functools
basicTypes = ['Byte', 'Int16', 'UInt16', 'Int32', 'UInt32', 'Int64', 'UInt64', 'Real32', 'Real64', 'Handle']
# Import all the array types from engine (there is no HandleArray)
arrayTypes = [t + 'Array' for t in basicTypes[:-1]]
for a in arrayTypes:
exec('from %s import %s as %s' % (engine.__name__, a, a))
# Intercept the default exception handling for the purposes of stripping
# parts of the stack trace that can confuse users. If you want the original
# stack trace define this environment variable
if not 'NTA_STANDARD_PYTHON_UNHANDLED_EXCEPTIONS' in os.environ:
import traceback
import cStringIO
def customExceptionHandler(type, value, tb):
"""Catch unhandled Python exception
The handler prints the original exception info including into a buffer.
It then extracts the original error message (when the exception is raised
inside a Py node additional stacktrace info will be appended in the end)
and saves the original exception to a file called error.txt. It prints
just the error message to the screen and tells the user about the error.txt
file.
"""
# Print the exception info to a string IO buffer for manipulation
buff = cStringIO.StringIO()
traceback.print_exception(type, value, tb, file=buff)
text = buff.getvalue()
# get the lines skip the first one: "Traceback (most recent call last)"
lines = text.split('\n')[1:]
#
# Extract the error message
begin = 0
end = len(lines)
for i, line in enumerate(lines):
if line.startswith('RuntimeError:'):
begin = i
#
# elif line.startswith('Traceback (most recent call last):'):
# end = i
# break
#
message = '\n'.join(lines[begin:end])
message = message[len('Runtime Error:'):]
#stacktrace = lines[end:]
# Get the stack trace if available (default to empty string)
stacktrace = getattr(value, 'stackTrace', '')
# Remove engine from stack trace
lines = [x for x in lines if 'engine' not in x]
failMessage = 'The program failed with the following error message:'
dashes = '-' * len(failMessage)
print
print dashes
print 'Traceback (most recent call last):'
print '\n'.join(lines[:begin-2])
if stacktrace:
print stacktrace
print dashes
print 'The program failed with the following error message:'
print dashes
print message
print
#sys.excepthook = customExceptionHandler
# ------------------------------
#
# T I M E R
#
# ------------------------------
# Expose the timer class directly
# Do it this way instead of bringing engine.Timer
# into the namespace to avoid engine
# in the class name
class Timer(engine.Timer):
pass
# ------------------------------
#
# O S
#
# ------------------------------
# Expose the os class directly
# The only wrapped method is getProcessMemoryUsage()
class OS(engine.OS):
pass
# ------------------------------
#
# D I M E N S I O N S
#
# ------------------------------
class Dimensions(engine.Dimensions):
"""Represent the topology of an N-dimensional region
Basically, it is a list of integers such as: [4, 8, 6]
In this example the topology is a 3 dimensional region with
4 x 8 x 6 nodes.
You can initialize it with a list of dimensions or with no arguments
and then append dimensions.
"""
def __init__(self, *args):
"""Construct a Dimensions object
The constructor can be called with no arguments or with a list
of integers
"""
# Init the base class
engine.Dimensions.__init__(self, *args)
def __str__(self):
return self.toString()
# ------------------------------
#
# A R R A Y
#
# ------------------------------
def Array(dtype, size=None, ref=False):
"""Factory function that creates typed Array or ArrayRef objects
dtype - the data type of the array (as string).
Supported types are: Byte, Int16, UInt16, Int32, UInt32, Int64, UInt64, Real32, Real64
size - the size of the array. Must be positive integer.
"""
def getArrayType(self):
"""A little function to replace the getType() method of arrays
It returns a string representation of the array element type instead of the
integer value (NTA_BasicType enum) returned by the origianl array
"""
return self._dtype
# ArrayRef can't be allocated
if ref:
assert size is None
index = basicTypes.index(dtype)
if index == -1:
raise Exception('Invalid data type: ' + dtype)
if size and size <= 0:
raise Exception('Array size must be positive')
suffix = 'ArrayRef' if ref else 'Array'
arrayFactory = getattr(engine, dtype + suffix)
arrayFactory.getType = getArrayType
if size:
a = arrayFactory(size)
else:
a = arrayFactory()
a._dtype = basicTypes[index]
return a
def ArrayRef(dtype):
return Array(dtype, None, True)
# -------------------------------------
#
# C O L L E C T I O N W R A P P E R
#
# -------------------------------------
class CollectionIterator(object):
def __init__(self, collection):
self.collection = collection
self.index = 0
def next(self):
index = self.index
if index == self.collection.getCount():
raise StopIteration
self.index += 1
return self.collection.getByIndex(index)[0]
class CollectionWrapper(object):
"""Wrap an nupic::Collection with a dict-like interface
The optional valueWrapper is used to wrap values for adaptation purposes.
Maintains the original documentation
collection - the original collection
valueWrapper - an optional callable object used to wrap values.
"""
def IdentityWrapper(o):
return o
def __init__(self, collection, valueWrapper=IdentityWrapper):
self.collection = collection
self.valueWrapper = valueWrapper
self.__class__.__doc__ == collection.__class__.__doc__
def __iter__(self):
return CollectionIterator(self.collection)
def __str__(self):
return str(self.collection)
def __repr__(self):
return repr(self.collection)
def __len__(self):
return self.collection.getCount()
def __getitem__(self, key):
if not self.collection.contains(key):
raise KeyError('Key ' + key + ' not found')
value = self.collection.getByName(key)
value = self.valueWrapper(key, value)
return value
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __contains__(self, key):
return self.collection.contains(key)
def keys(self):
keys = set()
for i in range(self.collection.getCount()):
keys.add(self.collection.getByIndex(i)[0])
return keys
def values(self):
values = set()
for i in range(self.collection.getCount()):
p = self.collection.getByIndex(i)
values.add(self.valueWrapper(p[0], p[1]))
return values
def items(self):
items = set()
for i in range(self.collection.getCount()):
p = self.collection.getByIndex(i)
items.add((p[0], self.valueWrapper(p[0], p[1])))
return items
def __cmp__(self, other):
return self.collection == other.collection
def __hash__(self):
return hash(self.collection)
# -----------------------------
#
# S P E C I T E M
#
# -----------------------------
class SpecItem(object):
"""Wrapper that translates the data type and access code to a string
The original values are an enumerated type in C++ that become
just integers in Python. This class wraps the original ParameterSpec
and translates the integer values to meaningful strings: that correspond to the C++ enum labels.
It is used to wrap ParameterSpec, InputSpec and OutputSpec
"""
accessModes = ['Create', 'ReadOnly', 'ReadWrite']
def __init__(self, name, item):
self.name = name
self.item = item
self.__class__.__doc__ == item.__class__.__doc__
# Translate data type to string representation
self.dataType = basicTypes[item.dataType]
# Translate access mode to string representation
if hasattr(item, 'accessMode'): # ParameterSpec only
self.accessMode = SpecItem.accessModes[item.accessMode]
def __getattr__(self, name):
return getattr(self.item, name)
def __str__(self):
d = dict(name=self.name,
description=self.description,
dataType=self.dataType,
count=self.count)
if hasattr(self.item, 'accessMode'): # ParameterSpec only
self.accessMode = SpecItem.accessModes[self.item.accessMode]
if hasattr(self.item, 'accessMode'): # ParameterSpec only
d['accessMode'] = self.accessMode
if hasattr(self.item, 'constraints'): # ParameterSpec only
d['constraints'] = self.constraints
if hasattr(self.item, 'defaultValue'): # ParameterSpec only
d['defaultValue'] = self.defaultValue
return str(d)
# -------------------
#
# S P E C
#
# -------------------
class Spec(object):
def __init__(self, spec):
self.spec = spec
self.__class__.__doc__ == spec.__class__.__doc__
self.description = spec.description
self.singleNodeOnly = spec.singleNodeOnly
self.inputs = CollectionWrapper(spec.inputs, SpecItem)
self.outputs = CollectionWrapper(spec.outputs, SpecItem)
self.parameters = CollectionWrapper(spec.parameters, SpecItem)
self.commands = CollectionWrapper(spec.commands)
def __str__(self):
return self.spec.toString()
def __repr__(self):
return self.spec.toString()
class _ArrayParameterHelper:
"""This class is used by Region._getParameterMethods"""
def __init__(self, region, datatype):
self._region = region
self.datatype = basicTypes[datatype]
def getParameterArray(self, paramName):
# return a PyArray instead of a plain array.
# PyArray constructor/class for type X is called XArray()
#factoryName = self.datatype + 'Array'
#if factoryName not in globals():
# import exceptions
# raise exceptions.Exception("Internal error -- did not find %s constructor in engine" % factoryName)
#
#arrayFactory = globals()[factoryName]
#a = arrayFactory();
a = Array(self.datatype)
self._region.getParameterArray(paramName, a)
return a
# -------------------------------------
#
# R E G I O N
#
# -------------------------------------
class Region(LockAttributesMixin):
"""
@doc:place_holder(Region.description)
"""
#Wrapper for a network region
#- Maintains original documentation
#- Implement syntactic sugar properties:
#name = property(getName)
#type = property(getType)
#spec = property(getSpec)
#dimensions = property(getDimensions, setDimensions)
#network = property(getNetwork)
#- Makes sure that returned objects are high-level wrapper objects
#- Forwards everything else to internal region
def __init__(self, region, network):
"""Store the wraped region and hosting network
The network is the high-level Network and not the internal
Network. This is important in case the user requests the network
from the region (never leak a engine object, remember)
"""
self._network = network
self._region = region
self.__class__.__doc__ == region.__class__.__doc__
# A cache for typed get/setPArameter() calls
self._paramTypeCache = {}
def __getattr__(self, name):
if not '_region' in self.__dict__:
raise AttributeError
return getattr(self._region, name)
def __setattr__(self, name, value):
if name in ('_region', '__class__', '_network'):
self.__dict__[name] = value
elif name == 'dimensions':
self.setDimensions(value)
else:
setattr(self._region, name, value)
@staticmethod
def getSpecFromType(nodeType):
"""
@doc:place_holder(Region.getSpecFromType)
"""
return Spec(engine.Region.getSpecFromType(nodeType))
def compute(self):
"""
@doc:place_holder(Region.compute)
** This line comes from the original docstring (not generated by Documentor)
"""
return self._region.compute()
def getInputData(self, inputName):
"""
@doc:place_holder(Region.getInputData)
"""
return self._region.getInputArray(inputName)
def getOutputData(self, outputName):
"""
@doc:place_holder(Region.getOutputData)
"""
return self._region.getOutputArray(outputName)
def executeCommand(self, args):
"""
@doc:place_holder(Region.executeCommand)
"""
return self._region.executeCommand(args)
def _getSpec(self):
"""Spec of the region"""
return Spec(self._region.getSpec())
def _getDimensions(self):
"""Dimensions of the region"""
return Dimensions(tuple(self._region.getDimensions()))
def _getNetwork(self):
"""Network for the region"""
return self._network
def __hash__(self):
"""Hash a region"""
return self._region.__hash__()
def __cmp__(self, other):
"""Compare regions"""
return self._region == other._region
def _getParameterMethods(self, paramName):
"""Returns functions to set/get the parameter. These are
the strongly typed functions get/setParameterUInt32, etc.
The return value is a pair:
setfunc, getfunc
If the parameter is not available on this region, setfunc/getfunc
are None. """
if paramName in self._paramTypeCache:
return self._paramTypeCache[paramName]
try:
# Catch the error here. We will re-throw in getParameter or
# setParameter with a better error message than we could generate here
paramSpec = self.getSpec().parameters.getByName(paramName)
except:
return (None, None)
dataType = paramSpec.dataType
dataTypeName = basicTypes[dataType]
count = paramSpec.count
if count == 1:
# Dynamically generate the proper typed get/setParameter<dataType>
x = 'etParameter' + dataTypeName
try:
g = getattr(self, 'g' + x) # get the typed getParameter method
s = getattr(self, 's' + x) # get the typed setParameter method
except AttributeError:
raise Exception("Internal error: unknown parameter type %s" % dataTypeName)
info = (s, g)
else:
if dataTypeName == "Byte":
info = (self.setParameterString, self.getParameterString)
else:
helper = _ArrayParameterHelper(self, dataType)
info = (self.setParameterArray, helper.getParameterArray)
self._paramTypeCache[paramName] = info
return info
def getParameter(self, paramName):
"""Get parameter value"""
(setter, getter) = self._getParameterMethods(paramName)
if getter is None:
import exceptions
raise exceptions.Exception("getParameter -- parameter name '%s' does not exist in region %s of type %s" %
(paramName, self.name, self.type))
return getter(paramName)
def setParameter(self, paramName, value):
"""Set parameter value"""
(setter, getter) = self._getParameterMethods(paramName)
if setter is None:
import exceptions
raise exceptions.Exception("setParameter -- parameter name '%s' does not exist in region %s of type %s" %
(paramName, self.name, self.type))
setter(paramName, value)
def _get(self, method):
"""Auto forwarding of properties to get methods of internal region"""
return getattr(self._region, method)()
network = property(_getNetwork,
doc='@property:place_holder(Region.getNetwork)')
name = property(functools.partial(_get, method='getName'),
doc="@property:place_holder(Region.getName)")
type = property(functools.partial(_get, method='getType'),
doc='@property:place_holder(Region.getType)')
spec = property(_getSpec,
doc='@property:place_holder(Region.getSpec)')
dimensions = property(_getDimensions,
engine.Region.setDimensions,
doc='@property:place_holder(Region.getDimensions)')
computeTimer = property(functools.partial(_get, method='getComputeTimer'),
doc='@property:place_holder(Region.getComputeTimer)')
executeTimer = property(functools.partial(_get, method='getExecuteTimer'),
doc='@property:place_holder(Region.getExecuteTimer)')
# ------------------------------
#
# N E T W O R K
#
# ------------------------------
class Network(engine.Network):
"""
@doc:place_holder(Network.description)
"""
def __init__(self, *args):
"""Constructor
- Initialize the internal engine.Network class generated by Swig
- Attach docstrings to selected methods
"""
# Init engine.Network class
engine.Network.__init__(self, *args)
# Prepare documentation table.
# Each item is pair of method/property, docstring
# The docstring is attached later to the method or property.
# The key for method items is the method object of the engine.Network class.
# The key for properties is the property name
docTable = (
(engine.Network.getRegions, 'Get the collection of regions in a network'),
)
# Attach documentation to methods and properties
for obj, docString in docTable:
if isinstance(obj, str):
prop = getattr(Network, obj)
assert isinstance(prop, property)
setattr(Network, obj, property(prop.fget, prop.fset, prop.fdel, docString))
else:
obj.im_func.__doc__ = docString
def _getRegions(self):
"""Get the collection of regions in a network
This is a tricky one. The collection of regions returned from
from the internal network is a collection of internal regions.
The desired collection is a collelcion of net.Region objects
that also points to this network (net.network) and not to
the internal network. To achieve that a CollectionWrapper
class is used with a custom makeRegion() function (see bellow)
as a value wrapper. The CollectionWrapper class wraps each value in the
original collection with the result of the valueWrapper.
"""
def makeRegion(name, r):
"""Wrap a engine region with a nupic.engine.Region
Also passes the containing nupic.engine.Network network in _network. This
function is passed a value wrapper to the CollectionWrapper
"""
r = Region(r, self)
#r._network = self
return r
regions = CollectionWrapper(engine.Network.getRegions(self), makeRegion)
return regions
def addRegion(self, name, nodeType, nodeParams):
"""
@doc:place_holder(Network.addRegion)
"""
engine.Network.addRegion(self, name, nodeType, nodeParams)
return self._getRegions()[name]
def addRegionFromBundle(self, name, nodeType, dimensions, bundlePath, label):
"""
@doc:place_holder(Network.addRegionFromBundle)
"""
engine.Network.addRegionFromBundle(self,
name,
nodeType,
dimensions,
bundlePath,
label)
return self._getRegions()[name]
def setPhases(self, name, phases):
"""
@doc:place_holder(Network.setPhases)
"""
phases = engine.UInt32Set(phases)
engine.Network.setPhases(self, name, phases)
def run(self, n):
"""
@doc:place_holder(Network.run)
"""
#Just forward to the internal network
#This is needed for inspectors to work properly because they wrap some key
#methods such as 'run'.
engine.Network.run(self, n)
def disableProfiling(self, *args, **kwargs):
"""
@doc:place_holder(Network.disableProfiling)
"""
engine.Network.disableProfiling(self, *args, **kwargs)
def enableProfiling(self, *args, **kwargs):
"""
@doc:place_holder(Network.enableProfiling)
"""
engine.Network.enableProfiling(self, *args, **kwargs)
def getCallbacks(self, *args, **kwargs):
"""
@doc:place_holder(Network.getCallbacks)
"""
engine.Network.getCallbacks(self, *args, **kwargs)
def initialize(self, *args, **kwargs):
"""
@doc:place_holder(Network.initialize)
"""
engine.Network.initialize(self, *args, **kwargs)
def link(self, *args, **kwargs):
"""
@doc:place_holder(Network.link)
"""
engine.Network.link(self, *args, **kwargs)
def removeLink(self, *args, **kwargs):
"""
@doc:place_holder(Network.removeLink)
"""
engine.Network.removeLink(self, *args, **kwargs)
def removeRegion(self, *args, **kwargs):
"""
@doc:place_holder(Network.removeRegion)
"""
engine.Network.removeRegion(self, *args, **kwargs)
def resetProfiling(self, *args, **kwargs):
"""
@doc:place_holder(Network.resetProfiling)
"""
engine.Network.resetProfiling(self, *args, **kwargs)
def save(self, *args, **kwargs):
"""
@doc:place_holder(Network.save)
"""
engine.Network.save(self, *args, **kwargs)
def inspect(self):
"""Launch a GUI inpector to inspect the network"""
from nupic.analysis import inspect
inspect(self)
# Syntactic sugar properties
regions = property(_getRegions, doc='@property:place_holder(Network.getRegions)')
minPhase = property(engine.Network.getMinPhase, doc='@property:place_holder(Network.getMinPhase)')
maxPhase = property(engine.Network.getMaxPhase, doc='@property:place_holder(Network.getMaxPhase)')
minEnabledPhase = property(engine.Network.getMinEnabledPhase, engine.Network.setMinEnabledPhase, doc='@property:place_holder(Network.getMinEnabledPhase)')
maxEnabledPhase = property(engine.Network.getMaxEnabledPhase, engine.Network.setMaxEnabledPhase, doc='@property:place_holder(Network.getMaxEnabledPhase)')
if __name__=='__main__':
n = Network()
print n.regions
print len(n.regions)
print Network.regions.__doc__
d = Dimensions([3, 4, 5])
print len(d)
print d
a = Array('Byte', 5)
print len(a)
for i in range(len(a)):
a[i] = ord('A') + i
for i in range(len(a)):
print a[i]
r = n.addRegion('r', 'TestNode', '')
print 'name:', r.name
print 'node type:', r.type
print 'node spec:', r.spec
|
chetan51/nupic
|
nupic/engine/__init__.py
|
Python
|
gpl-3.0
| 23,538
|
# Core imports
import copy
import Queue
import thread
import time
# Package imports
from ..core.Threadable import Threadable
from ..reconstruction.Buffer import Buffer
from ..settings import Arguments
from Packet import Packet
from TDMA_Scheduler import TDMA_Scheduler
# pylint: disable=undefined-all-variable
__all__ = [
"RF_Sensor_Simulator", "RF_Sensor_Physical_XBee",
"RF_Sensor_Physical_Texas_Instruments"
]
class RSSI_Validity_Request(object):
"""
A request to the valid location callback, containing properties that the
valid location callback can use to track the measurement validity.
"""
def __init__(self, specification, other_id=None, other_valid=None,
other_valid_pair=None, other_index=None):
self._is_broadcast = specification == "rssi_broadcast"
self._other_valid = other_valid
self._other_id = other_id
self._other_index = other_index
self._other_valid_pair = other_valid_pair
@property
def is_broadcast(self):
"""
Whether the RSSI packet is a measurement broadcast packet, or that it
is sent to the ground station.
"""
return self._is_broadcast
@property
def other_id(self):
"""
RF sensor ID of the other sensor.
"""
return self._other_id
@property
def other_valid(self):
"""
Whether the location of the other sensor is valid.
"""
return self._other_valid
@property
def other_valid_pair(self):
"""
Whether the other sensor has received a valid measurement from the
current sensor.
"""
return self._other_valid_pair
@property
def other_index(self):
"""
The waypoint index of the other sensor.
"""
return self._other_index
class DisabledException(Exception):
"""
Special exception indicating that the RF sensor was disabled during
execution of the sensor loop.
"""
pass
class RF_Sensor(Threadable):
"""
Base class for all RF sensors.
This class is responsible for setting up the basic characteristics of an
RF sensor and contains common code for the simulated and physical
specializations.
"""
def __init__(self, arguments, thread_manager, location_callback,
receive_callback, valid_callback):
"""
Initialize the RF sensor.
The `arguments` parameter is used to load settings for a specific RF
sensor type. The sensor has a `thread_manager`, which is a `Thread_Manager`
object for registering its own thread loop. Additionally, it requires
certian callbacks. The `location_callback` is called whenever the sensor
needs to know its own location for the "rssi_broadcast" and the
"rssi_ground_station" private packets. The `receive_callback` is called
whenever non-private packets are received and has the `Packet` object
as an argument. Finally, the `valid_callback` is called shortly after
the `location_callback` is called. It may be given a boolean argument
indicating whether another RF sensor has a valid location, but only when
creating the "rssi_ground_station" private packet. This is used by the
callback to determine if measurements at a certain location are finished.
Classes that inherit this base class may extend this method.
"""
super(RF_Sensor, self).__init__("rf_sensor", thread_manager)
# Make sure that the provided callbacks are callable.
for callback in [location_callback, receive_callback, valid_callback]:
if not hasattr(callback, "__call__"):
raise TypeError("Provided RF sensor callback is not callable")
# Load settings for a specific RF sensor type.
if isinstance(arguments, Arguments):
self._settings = arguments.get_settings(self.type)
else:
raise ValueError("'arguments' must be an instance of Arguments")
# Initialize common member variables.
self._id = self._settings.get("rf_sensor_id")
self._number_of_sensors = self._settings.get("number_of_sensors")
self._address = None
self._connection = None
self._buffer = None
self._scheduler = TDMA_Scheduler(self._id, arguments)
self._packets = Queue.Queue()
self._custom_packets = Queue.Queue()
self._joined = False
self._activated = False
self._started = False
self._loop_delay = self._settings.get("loop_delay")
self._location_callback = location_callback
self._receive_callback = receive_callback
self._valid_callback = valid_callback
@property
def id(self):
"""
Get the ID of the RF sensor.
"""
return self._id
@property
def number_of_sensors(self):
"""
Get the number of sensors in the network.
"""
return self._number_of_sensors
@property
def buffer(self):
"""
Get the buffer of the RF sensor.
"""
return self._buffer
@buffer.setter
def buffer(self, buffer):
"""
Set the buffer.
The `buffer` argument must be a `Buffer` object.
"""
if not isinstance(buffer, Buffer):
raise ValueError("The `buffer` argument must be a `Buffer` object")
self._buffer = buffer
@property
def type(self):
raise NotImplementedError("Subclasses must implement the `type` property")
@property
def identity(self):
"""
Get the identity of the RF sensor, consisting of its ID, address and
network join status.
Classes that inherit this base class may extend this property.
"""
return {
"id": self._id,
"address": self._address,
"joined": self._joined
}
def activate(self):
"""
Activate the sensor to start sending and receiving packets.
Classes that inherit this base class may extend this method.
"""
super(RF_Sensor, self).activate()
if not self._activated:
self._activated = True
if self._connection is None:
self._setup()
thread.start_new_thread(self._loop, ())
def deactivate(self):
"""
Deactivate the sensor to stop sending and receiving packets.
Classes that inherit this base class may extend this method.
"""
super(RF_Sensor, self).deactivate()
if self._activated:
self._activated = False
if self._connection is not None:
# Close the connection and clean up so that the thread might get
# the signal faster and we can correctly reactivate later on.
self._connection.close()
self._connection = None
def start(self):
"""
Start the signal strength measurements (and stop sending custom packets).
Classes that inherit this base class may extend this method.
"""
self._scheduler.update()
self._packets = Queue.Queue()
self._started = True
def stop(self):
"""
Stop the signal strength measurements (and start sending custom packets).
"""
self._started = False
# Reset the scheduler timestamp so that it updates correctly in case we
# restart the sensor measurements.
self._scheduler.timestamp = 0
def enqueue(self, packet, to=None):
"""
Enqueue a custom `packet` to send `to` another RF sensor in the network.
"""
if not isinstance(packet, Packet):
raise TypeError("Only `Packet` objects can be enqueued")
if packet.is_private():
raise ValueError("Private packets cannot be enqueued")
if to is None:
# No destination ID has been provided, so we broadcast the packet to
# all sensors in the network except for ourself and the ground station.
for to_id in xrange(1, self._number_of_sensors + 1):
if to_id == self._id:
continue
self._custom_packets.put({
"packet": copy.deepcopy(packet),
"to": to_id
})
else:
self._custom_packets.put({
"packet": packet,
"to": to
})
def discover(self, callback, required_sensors=None):
"""
Discover RF sensors in the network. The `callback` callable function is
called when an RF sensor reports its identity. The `required_sensors`
set indicates which sensors should be discovered; if it is not
provided, then all RF sensors are discovered. Note that discovery may
fail due to interference or disabled sensors.
Classes that inherit this base class must extend this method.
"""
if not hasattr(callback, "__call__"):
raise TypeError("Provided discovery callback is not callable")
if isinstance(required_sensors, set):
if not required_sensors.issubset(range(1, self._number_of_sensors + 1)):
raise ValueError("Provided required sensors may only contain vehicle sensors")
elif required_sensors is not None:
raise TypeError("Provided required sensors must be a `set`")
def _setup(self):
raise NotImplementedError("Subclasses must implement `_setup()`")
def _loop(self):
"""
Execute the sensor loop. This runs in a separate thread.
"""
try:
while self._activated:
self._loop_body()
except DisabledException:
return
except:
super(RF_Sensor, self).interrupt()
def _loop_body(self):
"""
Body of the sensor loop.
This is extracted into a separate method to make testing easier, as well
as for keeping the `_loop` implementation in the base class.
Classes that inherit this base class must extend this method.
"""
# If the sensor has been activated, we only send enqueued custom packets.
# If the sensor has been started, we stop sending custom packets and
# start performing signal strength measurements.
if not self._started:
self._send_custom_packets()
elif self._id > 0 and self._scheduler.in_slot:
self._send()
self._scheduler.update()
time.sleep(self._loop_delay)
def _send(self):
"""
Send a broadcast packet to each other sensor in the network and
send collected packets to the ground station.
Classes that inherit this base class may extend this method.
"""
# Create and send the RSSI broadcast packets.
for to_id in xrange(1, self._number_of_sensors + 1):
if not self._scheduler.in_slot:
return
if to_id == self._id:
continue
packet = self._create_rssi_broadcast_packet(to_id)
self._send_tx_frame(packet, to_id)
# Send collected packets to the ground station.
while not self._packets.empty() and self._scheduler.in_slot:
packet = self._packets.get()
self._send_tx_frame(packet, 0)
def _send_custom_packets(self):
"""
Send custom packets to their destinations.
"""
while not self._custom_packets.empty():
item = self._custom_packets.get()
self._send_tx_frame(item["packet"], item["to"])
def _send_tx_frame(self, packet, to=None):
"""
Send a TX frame with `packet` as payload `to` another sensor.
Classes that inherit this base class must extend this method.
"""
if self._connection is None:
raise DisabledException
if not isinstance(packet, Packet):
raise TypeError("Only `Packet` objects can be sent")
if to is None:
raise TypeError("Invalid destination '{}' has been provided".format(to))
# Introduce a short delay to give the hardware more time to send
# packets when this method is called many times in a row.
time.sleep(self._loop_delay)
def _receive(self, packet=None):
raise NotImplementedError("Subclasses must implement `_receive(packet=None)`")
def _create_rssi_broadcast_packet(self, to_id):
"""
Create a `Packet` object according to the "rssi_broadcast" specification.
The resulting packet is complete.
"""
location, waypoint_index = self._location_callback()
request = RSSI_Validity_Request("rssi_broadcast", other_id=to_id)
valid, valid_pair = self._valid_callback(request)
packet = Packet()
packet.set("specification", "rssi_broadcast")
packet.set("latitude", location[0])
packet.set("longitude", location[1])
packet.set("valid", valid)
packet.set("valid_pair", valid_pair)
packet.set("waypoint_index", waypoint_index)
packet.set("sensor_id", self._id)
packet.set("timestamp", time.time())
return packet
def _create_rssi_ground_station_packet(self, rssi_broadcast_packet):
"""
Create a `Packet` object according to the "rssi_ground_station"
specification using data from an `rssi_broadcast_packet`. The
`rssi_broadcast_packet` must be created according to the
"rssi_broadcast" specification.
The resulting packet is only missing RSSI information.
The packet can be sent to the ground station as an indication of the
signal strength between the RF sensor that sent the `rssi_broadcast_packet`
and the current RF sensor.
"""
from_valid = rssi_broadcast_packet.get("valid")
from_id = rssi_broadcast_packet.get("sensor_id")
from_waypoint_index = rssi_broadcast_packet.get("waypoint_index")
from_valid_pair = rssi_broadcast_packet.get("valid_pair")
location = self._location_callback()[0]
request = RSSI_Validity_Request("rssi_ground_station", other_id=from_id,
other_valid=from_valid,
other_valid_pair=from_valid_pair,
other_index=from_waypoint_index)
to_valid = self._valid_callback(request)[0]
packet = Packet()
packet.set("specification", "rssi_ground_station")
packet.set("sensor_id", self._id)
packet.set("from_latitude", rssi_broadcast_packet.get("latitude"))
packet.set("from_longitude", rssi_broadcast_packet.get("longitude"))
packet.set("from_valid", from_valid)
packet.set("to_latitude", location[0])
packet.set("to_longitude", location[1])
packet.set("to_valid", to_valid)
return packet
|
timvandermeij/mobile-radio-tomography
|
zigbee/RF_Sensor.py
|
Python
|
gpl-3.0
| 15,115
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
try:
import ovirtsdk4.types as otypes
from ovirtsdk4.types import StorageDomainStatus as sdstate
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
wait,
)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_storage_domains
short_description: Module to manage storage domains in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage storage domains in oVirt"
options:
name:
description:
- "Name of the the storage domain to manage."
state:
description:
- "Should the storage domain be present/absent/maintenance/unattached"
choices: ['present', 'absent', 'maintenance', 'unattached']
default: present
description:
description:
- "Description of the storage domain."
comment:
description:
- "Comment of the storage domain."
data_center:
description:
- "Data center name where storage domain should be attached."
- "This parameter isn't idempotent, it's not possible to change data center of storage domain."
domain_function:
description:
- "Function of the storage domain."
- "This parameter isn't idempotent, it's not possible to change domain function of storage domain."
choices: ['data', 'iso', 'export']
default: 'data'
aliases: ['type']
host:
description:
- "Host to be used to mount storage."
nfs:
description:
- "Dictionary with values for NFS storage type:"
- "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(version) - NFS version. One of: I(auto), I(v3), I(v4) or I(v4_1)."
- "C(timeout) - The time in tenths of a second to wait for a response before retrying NFS requests. Range 0 to 65535."
- "C(retrans) - The number of times to retry a request before attempting further recovery actions. Range 0 to 65535."
- "Note that these parameters are not idempotent."
iscsi:
description:
- "Dictionary with values for iSCSI storage type:"
- "C(address) - Address of the iSCSI storage server."
- "C(port) - Port of the iSCSI storage server."
- "C(target) - The target IQN for the storage device."
- "C(lun_id) - LUN id."
- "C(username) - A CHAP user name for logging into a target."
- "C(password) - A CHAP password for logging into a target."
- "Note that these parameters are not idempotent."
posixfs:
description:
- "Dictionary with values for PosixFS storage type:"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(vfs_type) - Virtual File System type."
- "C(mount_options) - Option which will be passed when mounting storage."
- "Note that these parameters are not idempotent."
glusterfs:
description:
- "Dictionary with values for GlusterFS storage type:"
- "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(mount_options) - Option which will be passed when mounting storage."
- "Note that these parameters are not idempotent."
fcp:
description:
- "Dictionary with values for fibre channel storage type:"
- "C(address) - Address of the fibre channel storage server."
- "C(port) - Port of the fibre channel storage server."
- "C(lun_id) - LUN id."
- "Note that these parameters are not idempotent."
destroy:
description:
- "Logical remove of the storage domain. If I(true) retains the storage domain's data for import."
- "This parameter is relevant only when C(state) is I(absent)."
format:
description:
- "If I(True) storage domain will be formatted after removing it from oVirt."
- "This parameter is relevant only when C(state) is I(absent)."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add data NFS storage domain
- ovirt_storage_domains:
name: data_nfs
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/data
# Add data iSCSI storage domain:
- ovirt_storage_domains:
name: data_iscsi
host: myhost
data_center: mydatacenter
iscsi:
target: iqn.2016-08-09.domain-01:nickname
lun_id: 1IET_000d0002
address: 10.34.63.204
# Import export NFS storage domain:
- ovirt_storage_domains:
domain_function: export
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/export
# Create ISO NFS storage domain
- ovirt_storage_domains:
name: myiso
domain_function: iso
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/iso
# Remove storage domain
- ovirt_storage_domains:
state: absent
name: mystorage_domain
format: true
'''
RETURN = '''
id:
description: ID of the storage domain which is managed
returned: On success if storage domain is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
storage domain:
description: "Dictionary of all the storage domain attributes. Storage domain attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/storage_domain."
returned: On success if storage domain is found.
'''
class StorageDomainModule(BaseModule):
def _get_storage_type(self):
for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']:
if self._module.params.get(sd_type) is not None:
return sd_type
def _get_storage(self):
for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']:
if self._module.params.get(sd_type) is not None:
return self._module.params.get(sd_type)
def _login(self, storage_type, storage):
if storage_type == 'iscsi':
hosts_service = self._connection.system_service().hosts_service()
host = search_by_name(hosts_service, self._module.params['host'])
hosts_service.host_service(host.id).iscsi_login(
iscsi=otypes.IscsiDetails(
username=storage.get('username'),
password=storage.get('password'),
address=storage.get('address'),
target=storage.get('target'),
),
)
def build_entity(self):
storage_type = self._get_storage_type()
storage = self._get_storage()
self._login(storage_type, storage)
return otypes.StorageDomain(
name=self._module.params['name'],
description=self._module.params['description'],
comment=self._module.params['comment'],
type=otypes.StorageDomainType(
self._module.params['domain_function']
),
host=otypes.Host(
name=self._module.params['host'],
),
storage=otypes.HostStorage(
type=otypes.StorageType(storage_type),
logical_units=[
otypes.LogicalUnit(
id=storage.get('lun_id'),
address=storage.get('address'),
port=storage.get('port', 3260),
target=storage.get('target'),
username=storage.get('username'),
password=storage.get('password'),
),
] if storage_type in ['iscsi', 'fcp'] else None,
mount_options=storage.get('mount_options'),
vfs_type=storage.get('vfs_type'),
address=storage.get('address'),
path=storage.get('path'),
nfs_retrans=storage.get('retrans'),
nfs_timeo=storage.get('timeout'),
nfs_version=otypes.NfsVersion(
storage.get('version')
) if storage.get('version') else None,
) if storage_type is not None else None
)
def _attached_sds_service(self):
# Get data center object of the storage domain:
dcs_service = self._connection.system_service().data_centers_service()
dc = search_by_name(dcs_service, self._module.params['data_center'])
if dc is None:
return
dc_service = dcs_service.data_center_service(dc.id)
return dc_service.storage_domains_service()
def _maintenance(self, storage_domain):
attached_sds_service = self._attached_sds_service()
if attached_sds_service is None:
return
attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id)
attached_sd = attached_sd_service.get()
if attached_sd and attached_sd.status != sdstate.MAINTENANCE:
if not self._module.check_mode:
attached_sd_service.deactivate()
self.changed = True
wait(
service=attached_sd_service,
condition=lambda sd: sd.status == sdstate.MAINTENANCE,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
)
def _unattach(self, storage_domain):
attached_sds_service = self._attached_sds_service()
if attached_sds_service is None:
return
attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id)
attached_sd = attached_sd_service.get()
if attached_sd and attached_sd.status == sdstate.MAINTENANCE:
if not self._module.check_mode:
# Detach the storage domain:
attached_sd_service.remove()
self.changed = True
# Wait until storage domain is detached:
wait(
service=attached_sd_service,
condition=lambda sd: sd is None,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
)
def pre_remove(self, storage_domain):
# Before removing storage domain we need to put it into maintenance state:
self._maintenance(storage_domain)
# Before removing storage domain we need to detach it from data center:
self._unattach(storage_domain)
def post_create_check(self, sd_id):
storage_domain = self._service.service(sd_id).get()
self._service = self._attached_sds_service()
# If storage domain isn't attached, attach it:
attached_sd_service = self._service.service(storage_domain.id)
if attached_sd_service.get() is None:
self._service.add(
otypes.StorageDomain(
id=storage_domain.id,
),
)
self.changed = True
# Wait until storage domain is in maintenance:
wait(
service=attached_sd_service,
condition=lambda sd: sd.status == sdstate.ACTIVE,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
)
def unattached_pre_action(self, storage_domain):
self._service = self._attached_sds_service(storage_domain)
self._maintenance(self._service, storage_domain)
def update_check(self, entity):
return (
equal(self._module.params['comment'], entity.comment) and
equal(self._module.params['description'], entity.description)
)
def failed_state(sd):
return sd.status in [sdstate.UNKNOWN, sdstate.INACTIVE]
def control_state(sd_module):
sd = sd_module.search_entity()
if sd is None:
return
sd_service = sd_module._service.service(sd.id)
if sd.status == sdstate.LOCKED:
wait(
service=sd_service,
condition=lambda sd: sd.status != sdstate.LOCKED,
fail_condition=failed_state,
)
if failed_state(sd):
raise Exception("Not possible to manage storage domain '%s'." % sd.name)
elif sd.status == sdstate.ACTIVATING:
wait(
service=sd_service,
condition=lambda sd: sd.status == sdstate.ACTIVE,
fail_condition=failed_state,
)
elif sd.status == sdstate.DETACHING:
wait(
service=sd_service,
condition=lambda sd: sd.status == sdstate.UNATTACHED,
fail_condition=failed_state,
)
elif sd.status == sdstate.PREPARING_FOR_MAINTENANCE:
wait(
service=sd_service,
condition=lambda sd: sd.status == sdstate.MAINTENANCE,
fail_condition=failed_state,
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'maintenance', 'unattached'],
default='present',
),
name=dict(required=True),
description=dict(default=None),
comment=dict(default=None),
data_center=dict(required=True),
domain_function=dict(choices=['data', 'iso', 'export'], default='data', aliases=['type']),
host=dict(default=None),
nfs=dict(default=None, type='dict'),
iscsi=dict(default=None, type='dict'),
posixfs=dict(default=None, type='dict'),
glusterfs=dict(default=None, type='dict'),
fcp=dict(default=None, type='dict'),
destroy=dict(type='bool', default=False),
format=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
storage_domains_service = connection.system_service().storage_domains_service()
storage_domains_module = StorageDomainModule(
connection=connection,
module=module,
service=storage_domains_service,
)
state = module.params['state']
control_state(storage_domains_module)
if state == 'absent':
ret = storage_domains_module.remove(
destroy=module.params['destroy'],
format=module.params['format'],
host=module.params['host'],
)
elif state == 'present':
sd_id = storage_domains_module.create()['id']
storage_domains_module.post_create_check(sd_id)
ret = storage_domains_module.action(
action='activate',
action_condition=lambda s: s.status == sdstate.MAINTENANCE,
wait_condition=lambda s: s.status == sdstate.ACTIVE,
fail_condition=failed_state,
)
elif state == 'maintenance':
sd_id = storage_domains_module.create()['id']
storage_domains_module.post_create_check(sd_id)
ret = storage_domains_module.action(
action='deactivate',
action_condition=lambda s: s.status == sdstate.ACTIVE,
wait_condition=lambda s: s.status == sdstate.MAINTENANCE,
fail_condition=failed_state,
)
elif state == 'unattached':
ret = storage_domains_module.create()
storage_domains_module.pre_remove(
storage_domain=storage_domains_service.service(ret['id']).get()
)
ret['changed'] = storage_domains_module.changed
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == "__main__":
main()
|
traveloka/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_storage_domains.py
|
Python
|
gpl-3.0
| 17,259
|
#!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_arg import eng_float, intx
from argparse import ArgumentParser
import sys
import numpy
try:
from matplotlib import pyplot
except ImportError:
print("Error: could not from matplotlib import pyplot (http://matplotlib.sourceforge.net/)")
sys.exit(1)
class example_fll(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*numpy.random.randint(0, 2, N) - 1.0
data = numpy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.fll = digital.fll_band_edge_cc(sps, rolloff, ntaps, bw)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_fll = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.vsnk_err = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.fll,1), self.vsnk_frq)
self.connect((self.fll,2), self.vsnk_phs)
self.connect((self.fll,3), self.vsnk_err)
def main():
parser = ArgumentParser(conflict_handler="resolve")
parser.add_argument("-N", "--nsamples", type=int, default=2000,
help="Set the number of samples to process [default=%(default)r]")
parser.add_argument("-S", "--sps", type=int, default=4,
help="Set the samples per symbol [default=%(default)r]")
parser.add_argument("-r", "--rolloff", type=eng_float, default=0.35,
help="Set the rolloff factor [default=%(default)r]")
parser.add_argument("-W", "--bandwidth", type=eng_float, default=2*numpy.pi/100.0,
help="Set the loop bandwidth [default=%(default)r]")
parser.add_argument("-n", "--ntaps", type=int, default=45,
help="Set the number of taps in the filters [default=%(default)r]")
parser.add_argument("--noise", type=eng_float, default=0.0,
help="Set the simulation noise voltage [default=%(default)r]")
parser.add_argument("-f", "--foffset", type=eng_float, default=0.2,
help="Set the simulation's normalized frequency offset (in Hz) [default=%(default)r]")
parser.add_argument("-t", "--toffset", type=eng_float, default=1.0,
help="Set the simulation's timing offset [default=%(default)r]")
parser.add_argument("-p", "--poffset", type=eng_float, default=0.0,
help="Set the simulation's phase offset [default=%(default)r]")
args = parser.parse_args()
# Adjust N for the interpolation by sps
args.nsamples = args.nsamples // args.sps
# Set up the program-under-test
put = example_fll(args.nsamples, args.sps, args.rolloff,
args.ntaps, args.bandwidth, args.noise,
args.foffset, args.toffset, args.poffset)
put.run()
data_src = numpy.array(put.vsnk_src.data())
data_err = numpy.array(put.vsnk_err.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = numpy.array(put.vsnk_frq.data()) / (2.0*numpy.pi)
# adjust this to align with the data. There are 2 filters of
# ntaps long and the channel introduces another 4 sample delay.
data_fll = numpy.array(put.vsnk_fll.data()[2*args.ntaps-4:])
# Plot the FLL's LO frequency
f1 = pyplot.figure(1, figsize=(12,10))
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("FLL LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the FLL's error
s2 = f1.add_subplot(2,2,2)
s2.plot(data_err)
s2.set_title("FLL Error")
s2.set_xlabel("Samples")
s2.set_ylabel("FLL Loop error")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,3)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_fll.real, data_fll.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
# Plot the symbols in time
s4 = f1.add_subplot(2,2,4)
s4.plot(data_src.real, "o-")
s4.plot(data_fll.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
jdemel/gnuradio
|
gr-digital/examples/example_fll.py
|
Python
|
gpl-3.0
| 5,057
|
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
source("../../shared/qtcreator.py")
# test New Qt Quick Application build and run for release and debug option
def main():
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
checkedTargets, projectName = createNewQtQuickApplication(tempDir(), "SampleApp")
# run project for debug and release and verify results
runVerify(checkedTargets)
#close Qt Creator
invokeMenuItem("File", "Exit")
|
pivonroll/Qt_Creator
|
tests/system/suite_APTW/tst_APTW02/test.py
|
Python
|
gpl-3.0
| 1,630
|
"""
Copyright (c) 2012, CCL Forensics
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the CCL Forensics nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CCL FORENSICS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
import struct
import datetime
__version__ = "0.11"
__description__ = "Converts Apple binary PList files into a native Python data structure"
__contact__ = "Alex Caithness"
class BplistError(Exception):
pass
class BplistUID:
def __init__(self, value):
self.value = value
def __repr__(self):
return "UID: {0}".format(self.value)
def __str__(self):
return self.__repr__()
def __decode_multibyte_int(b, signed=True):
if len(b) == 1:
fmt = ">B" # Always unsigned?
elif len(b) == 2:
fmt = ">h"
elif len(b) == 3:
if signed:
return ((b[0] << 16) | struct.unpack(">H", b[1:])[0]) - ((b[0] >> 7) * 2 * 0x800000)
else:
return (b[0] << 16) | struct.unpack(">H", b[1:])[0]
elif len(b) == 4:
fmt = ">i"
elif len(b) == 8:
fmt = ">q"
else:
raise BplistError("Cannot decode multibyte int of length {0}".format(len(b)))
if signed and len(b) > 1:
return struct.unpack(fmt.lower(), b)[0]
else:
return struct.unpack(fmt.upper(), b)[0]
def __decode_float(b, signed=True):
if len(b) == 4:
fmt = ">f"
elif len(b) == 8:
fmt = ">d"
else:
raise BplistError("Cannot decode float of length {0}".format(len(b)))
if signed:
return struct.unpack(fmt.lower(), b)[0]
else:
return struct.unpack(fmt.upper(), b)[0]
def __decode_object(f, offset, collection_offset_size, offset_table):
# Move to offset and read type
#print("Decoding object at offset {0}".format(offset))
f.seek(offset)
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
type_byte = ord(f.read(1)[0])
else:
type_byte = f.read(1)[0]
#print("Type byte: {0}".format(hex(type_byte)))
if type_byte == 0x00: # Null 0000 0000
return None
elif type_byte == 0x08: # False 0000 1000
return False
elif type_byte == 0x09: # True 0000 1001
return True
elif type_byte == 0x0F: # Fill 0000 1111
raise BplistError("Fill type not currently supported at offset {0}".format(f.tell())) # Not sure what to return really...
elif type_byte & 0xF0 == 0x10: # Int 0001 xxxx
int_length = 2 ** (type_byte & 0x0F)
int_bytes = f.read(int_length)
return __decode_multibyte_int(int_bytes)
elif type_byte & 0xF0 == 0x20: # Float 0010 nnnn
float_length = 2 ** (type_byte & 0x0F)
float_bytes = f.read(float_length)
return __decode_float(float_bytes)
elif type_byte & 0xFF == 0x33: # Date 0011 0011
date_bytes = f.read(8)
date_value = __decode_float(date_bytes)
return datetime.datetime(2001,1,1) + datetime.timedelta(seconds = date_value)
elif type_byte & 0xF0 == 0x40: # Data 0100 nnnn
if type_byte & 0x0F != 0x0F:
# length in 4 lsb
data_length = type_byte & 0x0F
else:
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
int_type_byte = ord(f.read(1)[0])
else:
int_type_byte = f.read(1)[0]
if int_type_byte & 0xF0 != 0x10:
raise BplistError("Long Data field definition not followed by int type at offset {0}".format(f.tell()))
int_length = 2 ** (int_type_byte & 0x0F)
int_bytes = f.read(int_length)
data_length = __decode_multibyte_int(int_bytes, False)
return f.read(data_length)
elif type_byte & 0xF0 == 0x50: # ASCII 0101 nnnn
if type_byte & 0x0F != 0x0F:
# length in 4 lsb
ascii_length = type_byte & 0x0F
else:
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
int_type_byte = ord(f.read(1)[0])
else:
int_type_byte = f.read(1)[0]
if int_type_byte & 0xF0 != 0x10:
raise BplistError("Long ASCII field definition not followed by int type at offset {0}".format(f.tell()))
int_length = 2 ** (int_type_byte & 0x0F)
int_bytes = f.read(int_length)
ascii_length = __decode_multibyte_int(int_bytes, False)
return f.read(ascii_length).decode("ascii")
elif type_byte & 0xF0 == 0x60: # UTF-16 0110 nnnn
if type_byte & 0x0F != 0x0F:
# length in 4 lsb
utf16_length = (type_byte & 0x0F) * 2 # Length is characters - 16bit width
else:
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
int_type_byte = ord(f.read(1)[0])
else:
int_type_byte = f.read(1)[0]
if int_type_byte & 0xF0 != 0x10:
raise BplistError("Long UTF-16 field definition not followed by int type at offset {0}".format(f.tell()))
int_length = 2 ** (int_type_byte & 0x0F)
int_bytes = f.read(int_length)
utf16_length = __decode_multibyte_int(int_bytes, False) * 2
return f.read(utf16_length).decode("utf_16_be")
elif type_byte & 0xF0 == 0x80: # UID 1000 nnnn
uid_length = (type_byte & 0x0F) + 1
uid_bytes = f.read(uid_length)
return BplistUID(__decode_multibyte_int(uid_bytes, signed=False))
elif type_byte & 0xF0 == 0xA0: # Array 1010 nnnn
if type_byte & 0x0F != 0x0F:
# length in 4 lsb
array_count = type_byte & 0x0F
else:
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
int_type_byte = ord(f.read(1)[0])
else:
int_type_byte = f.read(1)[0]
if int_type_byte & 0xF0 != 0x10:
raise BplistError("Long Array field definition not followed by int type at offset {0}".format(f.tell()))
int_length = 2 ** (int_type_byte & 0x0F)
int_bytes = f.read(int_length)
array_count = __decode_multibyte_int(int_bytes, signed=False)
array_refs = []
for i in range(array_count):
array_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False))
return [__decode_object(f, offset_table[obj_ref], collection_offset_size, offset_table) for obj_ref in array_refs]
elif type_byte & 0xF0 == 0xC0: # Set 1010 nnnn
if type_byte & 0x0F != 0x0F:
# length in 4 lsb
set_count = type_byte & 0x0F
else:
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
int_type_byte = ord(f.read(1)[0])
else:
int_type_byte = f.read(1)[0]
if int_type_byte & 0xF0 != 0x10:
raise BplistError("Long Set field definition not followed by int type at offset {0}".format(f.tell()))
int_length = 2 ** (int_type_byte & 0x0F)
int_bytes = f.read(int_length)
set_count = __decode_multibyte_int(int_bytes, signed=False)
set_refs = []
for i in range(set_count):
set_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False))
return [__decode_object(f, offset_table[obj_ref], collection_offset_size, offset_table) for obj_ref in set_refs]
elif type_byte & 0xF0 == 0xD0: # Dict 1011 nnnn
if type_byte & 0x0F != 0x0F:
# length in 4 lsb
dict_count = type_byte & 0x0F
else:
# A little hack to keep the script portable between py2.x and py3k
if sys.version_info[0] < 3:
int_type_byte = ord(f.read(1)[0])
else:
int_type_byte = f.read(1)[0]
#print("Dictionary length int byte: {0}".format(hex(int_type_byte)))
if int_type_byte & 0xF0 != 0x10:
raise BplistError("Long Dict field definition not followed by int type at offset {0}".format(f.tell()))
int_length = 2 ** (int_type_byte & 0x0F)
int_bytes = f.read(int_length)
dict_count = __decode_multibyte_int(int_bytes, signed=False)
key_refs = []
#print("Dictionary count: {0}".format(dict_count))
for i in range(dict_count):
key_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False))
value_refs = []
for i in range(dict_count):
value_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False))
dict_result = {}
for i in range(dict_count):
#print("Key ref: {0}\tVal ref: {1}".format(key_refs[i], value_refs[i]))
key = __decode_object(f, offset_table[key_refs[i]], collection_offset_size, offset_table)
val = __decode_object(f, offset_table[value_refs[i]], collection_offset_size, offset_table)
dict_result[key] = val
return dict_result
def load(f):
"""
Reads and converts a file-like object containing a binary property list.
Takes a file-like object (must support reading and seeking) as an argument
Returns a data structure representing the data in the property list
"""
# Check magic number
if f.read(8) != b"bplist00":
raise BplistError("Bad file header")
# Read trailer
f.seek(-32, os.SEEK_END)
trailer = f.read(32)
offset_int_size, collection_offset_size, object_count, top_level_object_index, offest_table_offset = struct.unpack(">6xbbQQQ", trailer)
# Read offset table
f.seek(offest_table_offset)
offset_table = []
for i in range(object_count):
offset_table.append(__decode_multibyte_int(f.read(offset_int_size), False))
return __decode_object(f, offset_table[top_level_object_index], collection_offset_size, offset_table)
def NSKeyedArchiver_convert(o, object_table):
if isinstance(o, list):
return NsKeyedArchiverList(o, object_table)
elif isinstance(o, dict):
return NsKeyedArchiverDictionary(o, object_table)
elif isinstance(o, BplistUID):
return NSKeyedArchiver_convert(object_table[o.value], object_table)
else:
return o
class NsKeyedArchiverDictionary(dict):
def __init__(self, original_dict, object_table):
super(NsKeyedArchiverDictionary, self).__init__(original_dict)
self.object_table = object_table
def __getitem__(self, index):
o = super(NsKeyedArchiverDictionary, self).__getitem__(index)
return NSKeyedArchiver_convert(o, self.object_table)
class NsKeyedArchiverList(list):
def __init__(self, original_iterable, object_table):
super(NsKeyedArchiverList, self).__init__(original_iterable)
self.object_table = object_table
def __getitem__(self, index):
o = super(NsKeyedArchiverList, self).__getitem__(index)
return NSKeyedArchiver_convert(o, self.object_table)
def __iter__(self):
for o in super(NsKeyedArchiverList, self).__iter__():
yield NSKeyedArchiver_convert(o, self.object_table)
def deserialise_NsKeyedArchiver(obj):
"""Deserialises an NSKeyedArchiver bplist rebuilding the structure.
obj should usually be the top-level object returned by the load()
function."""
# Check that this is an archiver and version we understand
if not isinstance(obj, dict):
raise TypeError("obj must be a dict")
if "$archiver" not in obj or obj["$archiver"] != "NSKeyedArchiver":
raise ValueError("obj does not contain an '$archiver' key or the '$archiver' is unrecognised")
if "$version" not in obj or obj["$version"] != 100000:
raise ValueError("obj does not contain a '$version' key or the '$version' is unrecognised")
object_table = obj["$objects"]
if "root" in obj["$top"]:
return NSKeyedArchiver_convert(obj["$top"]["root"], object_table)
else:
return NSKeyedArchiver_convert(obj["$top"], object_table)
# NSMutableDictionary convenience functions
def is_nsmutabledictionary(obj):
if not isinstance(obj, dict):
#print("not dict")
return False
if "$class" not in obj.keys():
#print("no class")
return False
if obj["$class"].get("$classname") != "NSMutableDictionary":
#print("wrong class")
return False
if "NS.keys" not in obj.keys():
#print("no keys")
return False
if "NS.objects" not in obj.keys():
#print("no objects")
return False
return True
def convert_NSMutableDictionary(obj):
"""Converts a NSKeyedArchiver serialised NSMutableDictionary into
a straight dictionary (rather than two lists as it is serialised
as)"""
# The dictionary is serialised as two lists (one for keys and one
# for values) which obviously removes all convenience afforded by
# dictionaries. This function converts this structure to an
# actual dictionary so that values can be accessed by key.
if not is_nsmutabledictionary(obj):
raise ValueError("obj does not have the correct structure for a NSMutableDictionary serialised to a NSKeyedArchiver")
keys = obj["NS.keys"]
vals = obj["NS.objects"]
# sense check the keys and values:
if not isinstance(keys, list):
raise TypeError("The 'NS.keys' value is an unexpected type (expected list; actual: {0}".format(type(keys)))
if not isinstance(vals, list):
raise TypeError("The 'NS.objects' value is an unexpected type (expected list; actual: {0}".format(type(vals)))
if len(keys) != len(vals):
raise ValueError("The length of the 'NS.keys' list ({0}) is not equal to that of the 'NS.objects ({1})".format(len(keys), len(vals)))
result = {}
for i,k in enumerate(keys):
if "k" in result:
raise ValueError("The 'NS.keys' list contains duplicate entries")
result[k] = vals[i]
return result
|
Wonfee/pymobiledevice
|
util/ccl_bplist.py
|
Python
|
gpl-3.0
| 15,606
|
# coding=utf-8
from ._commandbase import RadianceCommand
from ..datatype import RadiancePath
import os
class Epw2wea(RadianceCommand):
"""epw2wea transforms an EnergyPlus weather data (.epw) file into
the DAYSIM weather file format, for use with the RADIANCE gendaymtx
program.
Attributes:
epw_file: Filepath of the epw file that is to be converted into wea
format.
Usage:
from honeybee_plus.radiance.command.epw2wea import Epw2wea.
#create an epw2wea command.
epwWea = Epw2wea(epw_fileName='c:/ladybug/test.epw')
"""
_epw_file = RadiancePath('_epw_file',
descriptive_name='Epw weather data file',
relative_path=None, check_exists=False)
output_wea_file = RadiancePath('output_wea_file',
descriptive_name='Output wea file',
relative_path=None, check_exists=False)
def __init__(self, epw_file=None, output_wea_file=None):
RadianceCommand.__init__(self)
self.epw_file = epw_file
"""The path of the epw file that is to be converted to a wea file."""
self.output_wea_file = output_wea_file
"""The path of the output wea file. Note that this path will be created
if not specified by the user."""
@property
def epw_file(self):
return self._epw_file
@epw_file.setter
def epw_file(self, value):
"""The path of the epw file that is to be converted to a wea file."""
if value:
self._epw_file = value
if not self.output_wea_file._value:
self.output_wea_file = os.path.splitext(value)[0] + '.wea'
else:
self._epw_file = None
def to_rad_string(self, relative_path=False):
"""Return full radiance command as string"""
rad_string = "%s %s %s" % (
'"%s"' % os.path.join(self.radbin_path, 'epw2wea'),
self.epw_file.to_rad_string(),
self.output_wea_file.to_rad_string())
# self.check_input_files(rad_string)
return rad_string
@property
def input_files(self):
"""Return input files specified by user."""
return self.epw_file.normpath,
|
ladybug-analysis-tools/honeybee
|
honeybee_plus/radiance/command/epw2wea.py
|
Python
|
gpl-3.0
| 2,274
|
# -*- test-case-name: twisted.conch.test.test_telnet -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.telnet}.
"""
from __future__ import absolute_import, division
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.internet import defer
from twisted.conch import telnet
from twisted.trial import unittest
from twisted.test import proto_helpers
from twisted.python.compat import iterbytes
@implementer(telnet.ITelnetProtocol)
class TestProtocol:
localEnableable = ()
remoteEnableable = ()
def __init__(self):
self.bytes = b''
self.subcmd = []
self.calls = []
self.enabledLocal = []
self.enabledRemote = []
self.disabledLocal = []
self.disabledRemote = []
def makeConnection(self, transport):
d = transport.negotiationMap = {}
d[b'\x12'] = self.neg_TEST_COMMAND
d = transport.commandMap = transport.commandMap.copy()
for cmd in ('NOP', 'DM', 'BRK', 'IP', 'AO', 'AYT', 'EC', 'EL', 'GA'):
d[getattr(telnet, cmd)] = lambda arg, cmd=cmd: self.calls.append(cmd)
def dataReceived(self, bytes):
self.bytes += bytes
def connectionLost(self, reason):
pass
def neg_TEST_COMMAND(self, payload):
self.subcmd = payload
def enableLocal(self, option):
if option in self.localEnableable:
self.enabledLocal.append(option)
return True
return False
def disableLocal(self, option):
self.disabledLocal.append(option)
def enableRemote(self, option):
if option in self.remoteEnableable:
self.enabledRemote.append(option)
return True
return False
def disableRemote(self, option):
self.disabledRemote.append(option)
class InterfacesTests(unittest.TestCase):
def test_interface(self):
"""
L{telnet.TelnetProtocol} implements L{telnet.ITelnetProtocol}
"""
p = telnet.TelnetProtocol()
verifyObject(telnet.ITelnetProtocol, p)
class TelnetTransportTests(unittest.TestCase):
"""
Tests for L{telnet.TelnetTransport}.
"""
def setUp(self):
self.p = telnet.TelnetTransport(TestProtocol)
self.t = proto_helpers.StringTransport()
self.p.makeConnection(self.t)
def testRegularBytes(self):
# Just send a bunch of bytes. None of these do anything
# with telnet. They should pass right through to the
# application layer.
h = self.p.protocol
L = [b"here are some bytes la la la",
b"some more arrive here",
b"lots of bytes to play with",
b"la la la",
b"ta de da",
b"dum"]
for b in L:
self.p.dataReceived(b)
self.assertEqual(h.bytes, b''.join(L))
def testNewlineHandling(self):
# Send various kinds of newlines and make sure they get translated
# into \n.
h = self.p.protocol
L = [b"here is the first line\r\n",
b"here is the second line\r\0",
b"here is the third line\r\n",
b"here is the last line\r\0"]
for b in L:
self.p.dataReceived(b)
self.assertEqual(h.bytes, L[0][:-2] + b'\n' +
L[1][:-2] + b'\r' +
L[2][:-2] + b'\n' +
L[3][:-2] + b'\r')
def testIACEscape(self):
# Send a bunch of bytes and a couple quoted \xFFs. Unquoted,
# \xFF is a telnet command. Quoted, one of them from each pair
# should be passed through to the application layer.
h = self.p.protocol
L = [b"here are some bytes\xff\xff with an embedded IAC",
b"and here is a test of a border escape\xff",
b"\xff did you get that IAC?"]
for b in L:
self.p.dataReceived(b)
self.assertEqual(h.bytes, b''.join(L).replace(b'\xff\xff', b'\xff'))
def _simpleCommandTest(self, cmdName):
# Send a single simple telnet command and make sure
# it gets noticed and the appropriate method gets
# called.
h = self.p.protocol
cmd = telnet.IAC + getattr(telnet, cmdName)
L = [b"Here's some bytes, tra la la",
b"But ono!" + cmd + b" an interrupt"]
for b in L:
self.p.dataReceived(b)
self.assertEqual(h.calls, [cmdName])
self.assertEqual(h.bytes, b''.join(L).replace(cmd, b''))
def testInterrupt(self):
self._simpleCommandTest("IP")
def testNoOperation(self):
self._simpleCommandTest("NOP")
def testDataMark(self):
self._simpleCommandTest("DM")
def testBreak(self):
self._simpleCommandTest("BRK")
def testAbortOutput(self):
self._simpleCommandTest("AO")
def testAreYouThere(self):
self._simpleCommandTest("AYT")
def testEraseCharacter(self):
self._simpleCommandTest("EC")
def testEraseLine(self):
self._simpleCommandTest("EL")
def testGoAhead(self):
self._simpleCommandTest("GA")
def testSubnegotiation(self):
# Send a subnegotiation command and make sure it gets
# parsed and that the correct method is called.
h = self.p.protocol
cmd = telnet.IAC + telnet.SB + b'\x12hello world' + telnet.IAC + telnet.SE
L = [b"These are some bytes but soon" + cmd,
b"there will be some more"]
for b in L:
self.p.dataReceived(b)
self.assertEqual(h.bytes, b''.join(L).replace(cmd, b''))
self.assertEqual(h.subcmd, list(iterbytes(b"hello world")))
def testSubnegotiationWithEmbeddedSE(self):
# Send a subnegotiation command with an embedded SE. Make sure
# that SE gets passed to the correct method.
h = self.p.protocol
cmd = (telnet.IAC + telnet.SB +
b'\x12' + telnet.SE +
telnet.IAC + telnet.SE)
L = [b"Some bytes are here" + cmd + b"and here",
b"and here"]
for b in L:
self.p.dataReceived(b)
self.assertEqual(h.bytes, b''.join(L).replace(cmd, b''))
self.assertEqual(h.subcmd, [telnet.SE])
def testBoundarySubnegotiation(self):
# Send a subnegotiation command. Split it at every possible byte boundary
# and make sure it always gets parsed and that it is passed to the correct
# method.
cmd = (telnet.IAC + telnet.SB +
b'\x12' + telnet.SE + b'hello' +
telnet.IAC + telnet.SE)
for i in range(len(cmd)):
h = self.p.protocol = TestProtocol()
h.makeConnection(self.p)
a, b = cmd[:i], cmd[i:]
L = [b"first part" + a,
b + b"last part"]
for bytes in L:
self.p.dataReceived(bytes)
self.assertEqual(h.bytes, b''.join(L).replace(cmd, b''))
self.assertEqual(h.subcmd, [telnet.SE] + list(iterbytes(b'hello')))
def _enabledHelper(self, o, eL=[], eR=[], dL=[], dR=[]):
self.assertEqual(o.enabledLocal, eL)
self.assertEqual(o.enabledRemote, eR)
self.assertEqual(o.disabledLocal, dL)
self.assertEqual(o.disabledRemote, dR)
def testRefuseWill(self):
# Try to enable an option. The server should refuse to enable it.
cmd = telnet.IAC + telnet.WILL + b'\x12'
bytes = b"surrounding bytes" + cmd + b"to spice things up"
self.p.dataReceived(bytes)
self.assertEqual(self.p.protocol.bytes, bytes.replace(cmd, b''))
self.assertEqual(self.t.value(), telnet.IAC + telnet.DONT + b'\x12')
self._enabledHelper(self.p.protocol)
def testRefuseDo(self):
# Try to enable an option. The server should refuse to enable it.
cmd = telnet.IAC + telnet.DO + b'\x12'
bytes = b"surrounding bytes" + cmd + b"to spice things up"
self.p.dataReceived(bytes)
self.assertEqual(self.p.protocol.bytes, bytes.replace(cmd, b''))
self.assertEqual(self.t.value(), telnet.IAC + telnet.WONT + b'\x12')
self._enabledHelper(self.p.protocol)
def testAcceptDo(self):
# Try to enable an option. The option is in our allowEnable
# list, so we will allow it to be enabled.
cmd = telnet.IAC + telnet.DO + b'\x19'
bytes = b'padding' + cmd + b'trailer'
h = self.p.protocol
h.localEnableable = (b'\x19',)
self.p.dataReceived(bytes)
self.assertEqual(self.t.value(), telnet.IAC + telnet.WILL + b'\x19')
self._enabledHelper(h, eL=[b'\x19'])
def testAcceptWill(self):
# Same as testAcceptDo, but reversed.
cmd = telnet.IAC + telnet.WILL + b'\x91'
bytes = b'header' + cmd + b'padding'
h = self.p.protocol
h.remoteEnableable = (b'\x91',)
self.p.dataReceived(bytes)
self.assertEqual(self.t.value(), telnet.IAC + telnet.DO + b'\x91')
self._enabledHelper(h, eR=[b'\x91'])
def testAcceptWont(self):
# Try to disable an option. The server must allow any option to
# be disabled at any time. Make sure it disables it and sends
# back an acknowledgement of this.
cmd = telnet.IAC + telnet.WONT + b'\x29'
# Jimmy it - after these two lines, the server will be in a state
# such that it believes the option to have been previously enabled
# via normal negotiation.
s = self.p.getOptionState(b'\x29')
s.him.state = 'yes'
bytes = b"fiddle dee" + cmd
self.p.dataReceived(bytes)
self.assertEqual(self.p.protocol.bytes, bytes.replace(cmd, b''))
self.assertEqual(self.t.value(), telnet.IAC + telnet.DONT + b'\x29')
self.assertEqual(s.him.state, 'no')
self._enabledHelper(self.p.protocol, dR=[b'\x29'])
def testAcceptDont(self):
# Try to disable an option. The server must allow any option to
# be disabled at any time. Make sure it disables it and sends
# back an acknowledgement of this.
cmd = telnet.IAC + telnet.DONT + b'\x29'
# Jimmy it - after these two lines, the server will be in a state
# such that it believes the option to have beenp previously enabled
# via normal negotiation.
s = self.p.getOptionState(b'\x29')
s.us.state = 'yes'
bytes = b"fiddle dum " + cmd
self.p.dataReceived(bytes)
self.assertEqual(self.p.protocol.bytes, bytes.replace(cmd, b''))
self.assertEqual(self.t.value(), telnet.IAC + telnet.WONT + b'\x29')
self.assertEqual(s.us.state, 'no')
self._enabledHelper(self.p.protocol, dL=[b'\x29'])
def testIgnoreWont(self):
# Try to disable an option. The option is already disabled. The
# server should send nothing in response to this.
cmd = telnet.IAC + telnet.WONT + b'\x47'
bytes = b"dum de dum" + cmd + b"tra la la"
self.p.dataReceived(bytes)
self.assertEqual(self.p.protocol.bytes, bytes.replace(cmd, b''))
self.assertEqual(self.t.value(), b'')
self._enabledHelper(self.p.protocol)
def testIgnoreDont(self):
# Try to disable an option. The option is already disabled. The
# server should send nothing in response to this. Doing so could
# lead to a negotiation loop.
cmd = telnet.IAC + telnet.DONT + b'\x47'
bytes = b"dum de dum" + cmd + b"tra la la"
self.p.dataReceived(bytes)
self.assertEqual(self.p.protocol.bytes, bytes.replace(cmd, b''))
self.assertEqual(self.t.value(), b'')
self._enabledHelper(self.p.protocol)
def testIgnoreWill(self):
# Try to enable an option. The option is already enabled. The
# server should send nothing in response to this. Doing so could
# lead to a negotiation loop.
cmd = telnet.IAC + telnet.WILL + b'\x56'
# Jimmy it - after these two lines, the server will be in a state
# such that it believes the option to have been previously enabled
# via normal negotiation.
s = self.p.getOptionState(b'\x56')
s.him.state = 'yes'
bytes = b"tra la la" + cmd + b"dum de dum"
self.p.dataReceived(bytes)
self.assertEqual(self.p.protocol.bytes, bytes.replace(cmd, b''))
self.assertEqual(self.t.value(), b'')
self._enabledHelper(self.p.protocol)
def testIgnoreDo(self):
# Try to enable an option. The option is already enabled. The
# server should send nothing in response to this. Doing so could
# lead to a negotiation loop.
cmd = telnet.IAC + telnet.DO + b'\x56'
# Jimmy it - after these two lines, the server will be in a state
# such that it believes the option to have been previously enabled
# via normal negotiation.
s = self.p.getOptionState(b'\x56')
s.us.state = 'yes'
bytes = b"tra la la" + cmd + b"dum de dum"
self.p.dataReceived(bytes)
self.assertEqual(self.p.protocol.bytes, bytes.replace(cmd, b''))
self.assertEqual(self.t.value(), b'')
self._enabledHelper(self.p.protocol)
def testAcceptedEnableRequest(self):
# Try to enable an option through the user-level API. This
# returns a Deferred that fires when negotiation about the option
# finishes. Make sure it fires, make sure state gets updated
# properly, make sure the result indicates the option was enabled.
d = self.p.do(b'\x42')
h = self.p.protocol
h.remoteEnableable = (b'\x42',)
self.assertEqual(self.t.value(), telnet.IAC + telnet.DO + b'\x42')
self.p.dataReceived(telnet.IAC + telnet.WILL + b'\x42')
d.addCallback(self.assertEqual, True)
d.addCallback(lambda _: self._enabledHelper(h, eR=[b'\x42']))
return d
def test_refusedEnableRequest(self):
"""
If the peer refuses to enable an option we request it to enable, the
L{Deferred} returned by L{TelnetProtocol.do} fires with an
L{OptionRefused} L{Failure}.
"""
# Try to enable an option through the user-level API. This returns a
# Deferred that fires when negotiation about the option finishes. Make
# sure it fires, make sure state gets updated properly, make sure the
# result indicates the option was enabled.
self.p.protocol.remoteEnableable = (b'\x42',)
d = self.p.do(b'\x42')
self.assertEqual(self.t.value(), telnet.IAC + telnet.DO + b'\x42')
s = self.p.getOptionState(b'\x42')
self.assertEqual(s.him.state, 'no')
self.assertEqual(s.us.state, 'no')
self.assertEqual(s.him.negotiating, True)
self.assertEqual(s.us.negotiating, False)
self.p.dataReceived(telnet.IAC + telnet.WONT + b'\x42')
d = self.assertFailure(d, telnet.OptionRefused)
d.addCallback(lambda ignored: self._enabledHelper(self.p.protocol))
d.addCallback(
lambda ignored: self.assertEqual(s.him.negotiating, False))
return d
def test_refusedEnableOffer(self):
"""
If the peer refuses to allow us to enable an option, the L{Deferred}
returned by L{TelnetProtocol.will} fires with an L{OptionRefused}
L{Failure}.
"""
# Try to offer an option through the user-level API. This returns a
# Deferred that fires when negotiation about the option finishes. Make
# sure it fires, make sure state gets updated properly, make sure the
# result indicates the option was enabled.
self.p.protocol.localEnableable = (b'\x42',)
d = self.p.will(b'\x42')
self.assertEqual(self.t.value(), telnet.IAC + telnet.WILL + b'\x42')
s = self.p.getOptionState(b'\x42')
self.assertEqual(s.him.state, 'no')
self.assertEqual(s.us.state, 'no')
self.assertEqual(s.him.negotiating, False)
self.assertEqual(s.us.negotiating, True)
self.p.dataReceived(telnet.IAC + telnet.DONT + b'\x42')
d = self.assertFailure(d, telnet.OptionRefused)
d.addCallback(lambda ignored: self._enabledHelper(self.p.protocol))
d.addCallback(
lambda ignored: self.assertEqual(s.us.negotiating, False))
return d
def testAcceptedDisableRequest(self):
# Try to disable an option through the user-level API. This
# returns a Deferred that fires when negotiation about the option
# finishes. Make sure it fires, make sure state gets updated
# properly, make sure the result indicates the option was enabled.
s = self.p.getOptionState(b'\x42')
s.him.state = 'yes'
d = self.p.dont(b'\x42')
self.assertEqual(self.t.value(), telnet.IAC + telnet.DONT + b'\x42')
self.p.dataReceived(telnet.IAC + telnet.WONT + b'\x42')
d.addCallback(self.assertEqual, True)
d.addCallback(lambda _: self._enabledHelper(self.p.protocol,
dR=[b'\x42']))
return d
def testNegotiationBlocksFurtherNegotiation(self):
# Try to disable an option, then immediately try to enable it, then
# immediately try to disable it. Ensure that the 2nd and 3rd calls
# fail quickly with the right exception.
s = self.p.getOptionState(b'\x24')
s.him.state = 'yes'
self.p.dont(b'\x24') # fires after the first line of _final
def _do(x):
d = self.p.do(b'\x24')
return self.assertFailure(d, telnet.AlreadyNegotiating)
def _dont(x):
d = self.p.dont(b'\x24')
return self.assertFailure(d, telnet.AlreadyNegotiating)
def _final(x):
self.p.dataReceived(telnet.IAC + telnet.WONT + b'\x24')
# an assertion that only passes if d2 has fired
self._enabledHelper(self.p.protocol, dR=[b'\x24'])
# Make sure we allow this
self.p.protocol.remoteEnableable = (b'\x24',)
d = self.p.do(b'\x24')
self.p.dataReceived(telnet.IAC + telnet.WILL + b'\x24')
d.addCallback(self.assertEqual, True)
d.addCallback(lambda _: self._enabledHelper(self.p.protocol,
eR=[b'\x24'],
dR=[b'\x24']))
return d
d = _do(None)
d.addCallback(_dont)
d.addCallback(_final)
return d
def testSuperfluousDisableRequestRaises(self):
# Try to disable a disabled option. Make sure it fails properly.
d = self.p.dont(b'\xab')
return self.assertFailure(d, telnet.AlreadyDisabled)
def testSuperfluousEnableRequestRaises(self):
# Try to disable a disabled option. Make sure it fails properly.
s = self.p.getOptionState(b'\xab')
s.him.state = 'yes'
d = self.p.do(b'\xab')
return self.assertFailure(d, telnet.AlreadyEnabled)
def testLostConnectionFailsDeferreds(self):
d1 = self.p.do(b'\x12')
d2 = self.p.do(b'\x23')
d3 = self.p.do(b'\x34')
class TestException(Exception):
pass
self.p.connectionLost(TestException("Total failure!"))
d1 = self.assertFailure(d1, TestException)
d2 = self.assertFailure(d2, TestException)
d3 = self.assertFailure(d3, TestException)
return defer.gatherResults([d1, d2, d3])
class TestTelnet(telnet.Telnet):
"""
A trivial extension of the telnet protocol class useful to unit tests.
"""
def __init__(self):
telnet.Telnet.__init__(self)
self.events = []
def applicationDataReceived(self, bytes):
"""
Record the given data in C{self.events}.
"""
self.events.append(('bytes', bytes))
def unhandledCommand(self, command, bytes):
"""
Record the given command in C{self.events}.
"""
self.events.append(('command', command, bytes))
def unhandledSubnegotiation(self, command, bytes):
"""
Record the given subnegotiation command in C{self.events}.
"""
self.events.append(('negotiate', command, bytes))
class TelnetTests(unittest.TestCase):
"""
Tests for L{telnet.Telnet}.
L{telnet.Telnet} implements the TELNET protocol (RFC 854), including option
and suboption negotiation, and option state tracking.
"""
def setUp(self):
"""
Create an unconnected L{telnet.Telnet} to be used by tests.
"""
self.protocol = TestTelnet()
def test_enableLocal(self):
"""
L{telnet.Telnet.enableLocal} should reject all options, since
L{telnet.Telnet} does not know how to implement any options.
"""
self.assertFalse(self.protocol.enableLocal(b'\0'))
def test_enableRemote(self):
"""
L{telnet.Telnet.enableRemote} should reject all options, since
L{telnet.Telnet} does not know how to implement any options.
"""
self.assertFalse(self.protocol.enableRemote(b'\0'))
def test_disableLocal(self):
"""
It is an error for L{telnet.Telnet.disableLocal} to be called, since
L{telnet.Telnet.enableLocal} will never allow any options to be enabled
locally. If a subclass overrides enableLocal, it must also override
disableLocal.
"""
self.assertRaises(NotImplementedError, self.protocol.disableLocal, b'\0')
def test_disableRemote(self):
"""
It is an error for L{telnet.Telnet.disableRemote} to be called, since
L{telnet.Telnet.enableRemote} will never allow any options to be
enabled remotely. If a subclass overrides enableRemote, it must also
override disableRemote.
"""
self.assertRaises(NotImplementedError, self.protocol.disableRemote, b'\0')
def test_requestNegotiation(self):
"""
L{telnet.Telnet.requestNegotiation} formats the feature byte and the
payload bytes into the subnegotiation format and sends them.
See RFC 855.
"""
transport = proto_helpers.StringTransport()
self.protocol.makeConnection(transport)
self.protocol.requestNegotiation(b'\x01', b'\x02\x03')
self.assertEqual(
transport.value(),
# IAC SB feature bytes IAC SE
b'\xff\xfa\x01\x02\x03\xff\xf0')
def test_requestNegotiationEscapesIAC(self):
"""
If the payload for a subnegotiation includes I{IAC}, it is escaped by
L{telnet.Telnet.requestNegotiation} with another I{IAC}.
See RFC 855.
"""
transport = proto_helpers.StringTransport()
self.protocol.makeConnection(transport)
self.protocol.requestNegotiation(b'\x01', b'\xff')
self.assertEqual(
transport.value(),
b'\xff\xfa\x01\xff\xff\xff\xf0')
def _deliver(self, bytes, *expected):
"""
Pass the given bytes to the protocol's C{dataReceived} method and
assert that the given events occur.
"""
received = self.protocol.events = []
self.protocol.dataReceived(bytes)
self.assertEqual(received, list(expected))
def test_oneApplicationDataByte(self):
"""
One application-data byte in the default state gets delivered right
away.
"""
self._deliver(b'a', ('bytes', b'a'))
def test_twoApplicationDataBytes(self):
"""
Two application-data bytes in the default state get delivered
together.
"""
self._deliver(b'bc', ('bytes', b'bc'))
def test_threeApplicationDataBytes(self):
"""
Three application-data bytes followed by a control byte get
delivered, but the control byte doesn't.
"""
self._deliver(b'def' + telnet.IAC, ('bytes', b'def'))
def test_escapedControl(self):
"""
IAC in the escaped state gets delivered and so does another
application-data byte following it.
"""
self._deliver(telnet.IAC)
self._deliver(telnet.IAC + b'g', ('bytes', telnet.IAC + b'g'))
def test_carriageReturn(self):
"""
A carriage return only puts the protocol into the newline state. A
linefeed in the newline state causes just the newline to be
delivered. A nul in the newline state causes a carriage return to
be delivered. An IAC in the newline state causes a carriage return
to be delivered and puts the protocol into the escaped state.
Anything else causes a carriage return and that thing to be
delivered.
"""
self._deliver(b'\r')
self._deliver(b'\n', ('bytes', b'\n'))
self._deliver(b'\r\n', ('bytes', b'\n'))
self._deliver(b'\r')
self._deliver(b'\0', ('bytes', b'\r'))
self._deliver(b'\r\0', ('bytes', b'\r'))
self._deliver(b'\r')
self._deliver(b'a', ('bytes', b'\ra'))
self._deliver(b'\ra', ('bytes', b'\ra'))
self._deliver(b'\r')
self._deliver(
telnet.IAC + telnet.IAC + b'x', ('bytes', b'\r' + telnet.IAC + b'x'))
def test_applicationDataBeforeSimpleCommand(self):
"""
Application bytes received before a command are delivered before the
command is processed.
"""
self._deliver(
b'x' + telnet.IAC + telnet.NOP,
('bytes', b'x'), ('command', telnet.NOP, None))
def test_applicationDataBeforeCommand(self):
"""
Application bytes received before a WILL/WONT/DO/DONT are delivered
before the command is processed.
"""
self.protocol.commandMap = {}
self._deliver(
b'y' + telnet.IAC + telnet.WILL + b'\x00',
('bytes', b'y'), ('command', telnet.WILL, b'\x00'))
def test_applicationDataBeforeSubnegotiation(self):
"""
Application bytes received before a subnegotiation command are
delivered before the negotiation is processed.
"""
self._deliver(
b'z' + telnet.IAC + telnet.SB + b'Qx' + telnet.IAC + telnet.SE,
('bytes', b'z'), ('negotiate', b'Q', [b'x']))
|
Architektor/PySnip
|
venv/lib/python2.7/site-packages/twisted/conch/test/test_telnet.py
|
Python
|
gpl-3.0
| 26,581
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import superdesk
import superdesk.publish
from datetime import timedelta
from eve.utils import config, ParsedRequest
from flask import current_app as app
from superdesk import get_resource_service
from superdesk.celery_task_utils import get_lock_id
from superdesk.errors import PublishHTTPPushClientError
from superdesk.lock import lock, unlock
from superdesk.celery_app import celery
from superdesk.utc import utcnow
from superdesk.profiling import ProfileManager
from .publish_queue import QueueState
logger = logging.getLogger(__name__)
UPDATE_SCHEDULE_DEFAULT = {'seconds': 10}
PUBLISH_QUEUE = 'publish_queue'
STATE_PENDING = 'pending'
class PublishContent(superdesk.Command):
"""Runs deliveries"""
def run(self, provider_type=None):
publish.apply_async(expires=10)
@celery.task(soft_time_limit=1800)
def publish():
"""Fetch items from publish queue as per the configuration, call the transmit function."""
with ProfileManager('publish:transmit'):
lock_name = get_lock_id("Transmit", "Articles")
if not lock(lock_name, expire=1810):
logger.info('Task: {} is already running.'.format(lock_name))
return
try:
# Query any oustanding transmit requests
items = list(get_queue_items())
if len(items) > 0:
transmit_items(items)
# Query any outstanding retry attempts
retry_items = list(get_queue_items(True))
if len(retry_items) > 0:
transmit_items(retry_items)
except Exception:
logger.exception('Task: {} failed.'.format(lock_name))
finally:
unlock(lock_name)
def get_queue_items(retries=False):
if retries:
lookup = {
'$and': [
{'state': QueueState.RETRYING.value},
{'next_retry_attempt_at': {'$lte': utcnow()}}
]
}
else:
lookup = {
'$and': [
{'state': QueueState.PENDING.value}
]
}
request = ParsedRequest()
request.max_results = app.config.get('MAX_TRANSMIT_QUERY_LIMIT', 500)
# ensure we publish in the correct sequence
request.sort = '[("_created", 1), ("subscriber_id", 1), ("published_seq_num", 1)]'
return get_resource_service(PUBLISH_QUEUE).get(req=request, lookup=lookup)
@celery.task(soft_time_limit=600)
def transmit_subscriber_items(queue_items, subscriber):
lock_name = get_lock_id('Subscriber', 'Transmit', subscriber)
publish_queue_service = get_resource_service(PUBLISH_QUEUE)
if not lock(lock_name, expire=610):
return
try:
for queue_item in queue_items:
log_msg = '_id: {_id} item_id: {item_id} state: {state} ' \
'item_version: {item_version} headline: {headline}'.format(**queue_item)
try:
# check the status of the queue item
queue_item = publish_queue_service.find_one(req=None, _id=queue_item[config.ID_FIELD])
if queue_item.get('state') not in [QueueState.PENDING.value, QueueState.RETRYING.value]:
logger.info('Transmit State is not pending/retrying for queue item: {}. It is in {}'.
format(queue_item.get(config.ID_FIELD), queue_item.get('state')))
continue
# update the status of the item to in-progress
queue_update = {'state': 'in-progress', 'transmit_started_at': utcnow()}
publish_queue_service.patch(queue_item.get(config.ID_FIELD), queue_update)
logger.info('Transmitting queue item {}'.format(log_msg))
destination = queue_item['destination']
transmitter = superdesk.publish.registered_transmitters[destination.get('delivery_type')]
transmitter.transmit(queue_item)
logger.info('Transmitted queue item {}'.format(log_msg))
except Exception as e:
logger.exception('Failed to transmit queue item {}'.format(log_msg))
max_retry_attempt = app.config.get('MAX_TRANSMIT_RETRY_ATTEMPT')
retry_attempt_delay = app.config.get('TRANSMIT_RETRY_ATTEMPT_DELAY_MINUTES')
try:
orig_item = publish_queue_service.find_one(req=None, _id=queue_item['_id'])
updates = {config.LAST_UPDATED: utcnow()}
if orig_item.get('retry_attempt', 0) < max_retry_attempt and \
not isinstance(e, PublishHTTPPushClientError):
updates['retry_attempt'] = orig_item.get('retry_attempt', 0) + 1
updates['state'] = QueueState.RETRYING.value
updates['next_retry_attempt_at'] = utcnow() + timedelta(minutes=retry_attempt_delay)
else:
# all retry attempts exhausted marking the item as failed.
updates['state'] = QueueState.FAILED.value
publish_queue_service.system_update(orig_item.get(config.ID_FIELD), updates, orig_item)
except Exception:
logger.error('Failed to set the state for failed publish queue item {}.'.format(queue_item['_id']))
finally:
unlock(lock_name)
def transmit_items(queue_items):
# get a distinct list of the subscribers that have queued items
subscribers = list(set([q['subscriber_id'] for q in queue_items]))
# extract the queued items for each subscriber and transmit them
for subscriber in subscribers:
sub_queue_items = [item for item in queue_items if item['subscriber_id'] == subscriber]
transmit_subscriber_items.apply_async(kwargs={'queue_items': sub_queue_items, 'subscriber': str(subscriber)})
superdesk.command('publish:transmit', PublishContent())
|
mugurrus/superdesk-core
|
superdesk/publish/publish_content.py
|
Python
|
agpl-3.0
| 6,229
|
from django.db import models
from helusers.models import AbstractUser
class User(AbstractUser):
pass
|
City-of-Helsinki/devheldev
|
users/models.py
|
Python
|
agpl-3.0
| 107
|
import os
from queue import Queue
from bears.general.LicenseHeaderBear import LicenseHeaderBear
from coalib.testing.LocalBearTestHelper import LocalBearTestHelper
from coalib.results.Result import Result
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
def get_testfile_path(name):
return os.path.join(os.path.dirname(__file__),
'licenseheader_test_files',
name)
def load_testfile(name):
with open(get_testfile_path(name)) as f:
output = f.readlines()
return output
class LicenseHeaderBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('name')
self.uut = LicenseHeaderBear(self.section, Queue())
def test_copyright_without_author(self):
file_contents = load_testfile('CopyrightWithoutAuthor.java')
self.check_validity(self.uut, file_contents)
def test_copyright_with_given_author(self):
file_contents = load_testfile('copyright_with_given_author.txt')
self.section.append(Setting('author_name', 'The coala developers'))
self.check_validity(
self.uut,
file_contents)
def test_copyright_with_different_author(self):
file_contents = load_testfile('copyright_with_different_author.txt')
self.section.append(Setting('author_name', 'The coala developers'))
self.check_results(
self.uut,
file_contents,
[Result.from_values('LicenseHeaderBear',
'Copyright notice with different/no author '
'present.',
file=get_testfile_path('copyright_with_diff'
'erent_author.txt'))],
filename=get_testfile_path('copyright_with_'
'different_author.txt'))
def test_no_copyright(self):
file_contents = load_testfile('no_copyright.py')
self.check_results(
self.uut,
file_contents,
[Result.from_values('LicenseHeaderBear',
'Copyright notice not present.',
file=get_testfile_path('no_copyright.py'))],
filename=get_testfile_path('no_copyright.py'))
|
coala-analyzer/coala-bears
|
tests/general/LicenseHeaderBearTest.py
|
Python
|
agpl-3.0
| 2,350
|
# -*- coding: utf-8 -*-
#
# pi-manage documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 11 19:10:09 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pi-manage'
copyright = u'2015, privacyIDEA'
author = u'privacyIDEA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.8'
# The full version, including alpha/beta/rc tags.
release = '2.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pi-managedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pi-manage.tex', u'pi-manage Documentation',
u'privacyIDEA', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pi-manage', u'pi-manage Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pi-manage', u'pi-manage Documentation',
author, 'pi-manage', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
jh23453/privacyidea
|
doc/installation/system/pimanage/conf.py
|
Python
|
agpl-3.0
| 9,178
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Issue.status'
db.add_column('core_issue', 'status',
self.gf('django.db.models.fields.CharField')(default='EMPTY', max_length=40),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Issue.status'
db.delete_column('core_issue', 'status')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bitcoin_frespo.moneysent': {
'Meta': {'object_name': 'MoneySent'},
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'from_address': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'to_address': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'transaction_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'})
},
'bitcoin_frespo.receiveaddress': {
'Meta': {'object_name': 'ReceiveAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.actionlog': {
'Meta': {'object_name': 'ActionLog'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'entity': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']", 'null': 'True'}),
'issue_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.IssueComment']", 'null': 'True'}),
'new_json': ('django.db.models.fields.TextField', [], {}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']", 'null': 'True'}),
'old_json': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Project']", 'null': 'True'}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Solution']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'core.issue': {
'Meta': {'object_name': 'Issue'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_feedback': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public_suggestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Project']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'trackerURL_noprotocol': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updatedDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'core.issuecomment': {
'Meta': {'object_name': 'IssueComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"})
},
'core.issuecommenthistevent': {
'Meta': {'object_name': 'IssueCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.IssueComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.issuewatch': {
'Meta': {'object_name': 'IssueWatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.offer': {
'Meta': {'object_name': 'Offer'},
'acceptanceCriteria': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'expirationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'no_forking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'require_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sponsor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.offercomment': {
'Meta': {'object_name': 'OfferComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"})
},
'core.offercommenthistevent': {
'Meta': {'object_name': 'OfferCommentHistEvent'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.OfferComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.offerhistevent': {
'Meta': {'object_name': 'OfferHistEvent'},
'acceptanceCriteria': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'expirationDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_forking': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'require_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.offerwatch': {
'Meta': {'object_name': 'OfferWatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.payment': {
'Meta': {'object_name': 'Payment'},
'bitcoin_fee': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '8'}),
'bitcoin_receive_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bitcoin_frespo.ReceiveAddress']", 'null': 'True'}),
'bitcoin_transaction_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'confirm_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'fee': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Offer']"}),
'offer2payment_suggested_rate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '8'}),
'offer_currency': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'paykey': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'total': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'}),
'total_bitcoin_received': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '8'}),
'usd2payment_rate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '8'})
},
'core.paymenthistevent': {
'Meta': {'object_name': 'PaymentHistEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Payment']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.paymentpart': {
'Meta': {'object_name': 'PaymentPart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'money_sent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bitcoin_frespo.MoneySent']", 'null': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Payment']"}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '8'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Solution']"})
},
'core.project': {
'Meta': {'object_name': 'Project'},
'createdByUser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'homeURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image3x1': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'trackerURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'core.solution': {
'Meta': {'object_name': 'Solution'},
'accepting_payments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creationDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Issue']"}),
'lastChangeDate': ('django.db.models.fields.DateTimeField', [], {}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.solutionhistevent': {
'Meta': {'object_name': 'SolutionHistEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'eventDate': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Solution']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'core.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'bitcoin_receive_address': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'brazilianPaypal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hide_from_userlist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_paypal_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_primary_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'paypalEmail': ('django.db.models.fields.EmailField', [], {'max_length': '256'}),
'paypal_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferred_language_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'realName': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'receiveAllEmail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiveEmail_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiveEmail_issue_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiveEmail_issue_offer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiveEmail_issue_payment': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiveEmail_issue_work': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'screenName': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
|
bankonme/www.freedomsponsors.org
|
djangoproject/core/migrations/0046_auto__add_field_issue_status.py
|
Python
|
agpl-3.0
| 20,741
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyMarkupsafe(PythonPackage):
"""MarkupSafe is a library for Python that implements a unicode
string that is aware of HTML escaping rules and can be used to
implement automatic string escaping. It is used by Jinja 2, the
Mako templating engine, the Pylons web framework and many more."""
homepage = "http://www.pocoo.org/projects/markupsafe/"
url = "https://pypi.io/packages/source/M/MarkupSafe/MarkupSafe-1.0.tar.gz"
import_modules = ['markupsafe']
version('1.0', '2fcedc9284d50e577b5192e8e3578355')
version('0.23', 'f5ab3deee4c37cd6a922fb81e730da6e')
version('0.22', 'cb3ec29fd5361add24cfd0c6e2953b3e')
version('0.21', 'fde838d9337fa51744283f46a1db2e74')
version('0.20', '7da066d9cb191a70aa85d0a3d43565d1')
version('0.19', 'ccb3f746c807c5500850987006854a6d')
depends_on('py-setuptools', type='build')
|
wscullin/spack
|
var/spack/repos/builtin/packages/py-markupsafe/package.py
|
Python
|
lgpl-2.1
| 2,130
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import glob
class Guidance(MakefilePackage):
"""Guidance: Accurate detection of unreliable alignment regions accounting
for the uncertainty of multiple parameters."""
homepage = "http://guidance.tau.ac.il/ver2/"
url = "http://guidance.tau.ac.il/ver2/guidance.v2.02.tar.gz"
version('2.02', sha256='825e105dde526759fb5bda1cd539b24db0b90b8b586f26b1df74d9c5abaa7844')
depends_on('perl', type=('build', 'run'))
depends_on('perl-bioperl', type=('build', 'run'))
depends_on('ruby')
depends_on('prank')
depends_on('clustalw')
depends_on('mafft')
depends_on('muscle')
conflicts('%gcc@6.2.0:')
def edit(self, spec, prefix):
for dir in 'Guidance', 'Selecton', 'bioSequence_scripts_and_constants':
with working_dir(join_path('www', dir)):
files = glob.iglob('*.pl')
for file in files:
perl = FileFilter(file)
perl.filter('#!/usr/bin/perl -w', '#!/usr/bin/env perl')
def install(self, spac, prefix):
mkdir(prefix.bin)
install_tree('libs', prefix.bin.libs)
install_tree('programs', prefix.bin.programs)
install_tree('www', prefix.bin.www)
with working_dir(join_path('www', 'Guidance')): # copy without suffix
install('guidance.pl', join_path(prefix.bin.www.Guidance,
'guidance'))
def setup_run_environment(self, env):
env.prepend_path('PATH', prefix.bin.www.Guidance)
|
iulian787/spack
|
var/spack/repos/builtin/packages/guidance/package.py
|
Python
|
lgpl-2.1
| 1,751
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
import os
from smart_open import smart_open
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
from gensim.models.doc2vec import Doc2Vec
from gensim.models.word2vec import Word2Vec
try:
from annoy import AnnoyIndex
except ImportError:
raise ImportError("Annoy has not been installed, if you wish to use the annoy indexer, please run `pip install annoy`")
class AnnoyIndexer(object):
def __init__(self, model=None, num_trees=None):
self.index = None
self.labels = None
self.model = model
self.num_trees = num_trees
if model and num_trees:
if isinstance(self.model, Doc2Vec):
self.build_from_doc2vec()
elif isinstance(self.model, Word2Vec):
self.build_from_word2vec()
else:
raise ValueError("Only a Word2Vec or Doc2Vec instance can be used")
def save(self, fname, protocol=2):
fname_dict = fname + '.d'
self.index.save(fname)
d = {'f': self.model.vector_size, 'num_trees': self.num_trees, 'labels': self.labels}
with smart_open(fname_dict, 'wb') as fout:
_pickle.dump(d, fout, protocol=protocol)
def load(self, fname):
fname_dict = fname+'.d'
if not (os.path.exists(fname) and os.path.exists(fname_dict)):
raise IOError(
"Can't find index files '%s' and '%s' - Unable to restore AnnoyIndexer state." % (fname, fname_dict))
else:
with smart_open(fname_dict) as f:
d = _pickle.loads(f.read())
self.num_trees = d['num_trees']
self.index = AnnoyIndex(d['f'])
self.index.load(fname)
self.labels = d['labels']
def build_from_word2vec(self):
"""Build an Annoy index using word vectors from a Word2Vec model"""
self.model.init_sims()
return self._build_from_model(self.model.wv.syn0norm, self.model.index2word
, self.model.vector_size)
def build_from_doc2vec(self):
"""Build an Annoy index using document vectors from a Doc2Vec model"""
docvecs = self.model.docvecs
docvecs.init_sims()
labels = [docvecs.index_to_doctag(i) for i in range(0, docvecs.count)]
return self._build_from_model(docvecs.doctag_syn0norm, labels, self.model.vector_size)
def _build_from_model(self, vectors, labels, num_features):
index = AnnoyIndex(num_features)
for vector_num, vector in enumerate(vectors):
index.add_item(vector_num, vector)
index.build(self.num_trees)
self.index = index
self.labels = labels
def most_similar(self, vector, num_neighbors):
"""Find the top-N most similar items"""
ids, distances = self.index.get_nns_by_vector(
vector, num_neighbors, include_distances=True)
return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))]
|
olavurmortensen/gensim
|
gensim/similarities/index.py
|
Python
|
lgpl-2.1
| 3,188
|
# -*- Mode: Python; -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
HAS_MODULES = False
try:
from icalendar import vDatetime
HAS_MODULES = True
except ImportError:
pass
import os
import tempfile
import time
from datetime import datetime, timedelta
from flumotion.common import testsuite
from twisted.trial import unittest
from twisted.internet import defer
from flumotion.common import keycards
from flumotion.common.planet import moods
from flumotion.component.bouncers import icalbouncer
from flumotion.component.bouncers.algorithms import icalbouncer as \
icalbounceralgorithm
from flumotion.component.base import scheduler
from flumotion.common import eventcalendar
from flumotion.common import tz
def _get_config(path=None):
props = {'name': 'testbouncer',
'plugs': {},
'properties': {}}
if path:
props['properties']['file'] = path
return props
def get_iCalScheduler(bouncer):
return bouncer.get_main_algorithm().iCalScheduler
class RequiredModulesMixin(object):
if not HAS_MODULES:
skip = 'This test requires the icalendar and dateutil modules'
class TestIcalBouncerSetup(testsuite.TestCase, RequiredModulesMixin):
def setUp(self):
self.bouncer = None
self.path = os.path.join(os.path.split(__file__)[0],
'test-google.ics')
def tearDown(self):
if self.bouncer:
self.bouncer.stop()
def testNoFileProperty(self):
conf = _get_config()
self.bouncer = icalbouncer.IcalBouncer(conf)
self.assertEquals(self.bouncer.getMood(), moods.sad.value)
def testNonexistentIcalFile(self):
conf = _get_config('/you/dont/have/that/file')
self.bouncer = icalbouncer.IcalBouncer(conf)
self.assertEquals(self.bouncer.getMood(), moods.sad.value)
def testMalformedIcalFile(self):
conf = _get_config(__file__)
self.bouncer = icalbouncer.IcalBouncer(conf)
self.assertEquals(self.bouncer.getMood(), moods.sad.value)
def testSuccessfulSetup(self):
conf = _get_config(self.path)
self.bouncer = icalbouncer.IcalBouncer(conf)
self.assertEquals(self.bouncer.getMood(), moods.happy.value)
class TestIcalBouncerRunning(testsuite.TestCase, RequiredModulesMixin):
def setUp(self):
self.bouncer = None
self.now = datetime.now(tz.UTC)
self.a_day_ago = self.now - timedelta(days=1)
self.half_an_hour_ago = self.now - timedelta(minutes=30)
self.in_half_an_hour = self.now + timedelta(minutes=30)
self.ical_template = """
BEGIN:VCALENDAR
PRODID:-//Flumotion Fake Calendar Creator//flumotion.com//
VERSION:2.0
BEGIN:VTIMEZONE
TZID:Asia/Shanghai
BEGIN:STANDARD
TZOFFSETFROM:+0800
TZOFFSETTO:+0800
TZNAME:CET
DTSTART:19701025T030000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VTIMEZONE
TZID:America/Guatemala
BEGIN:STANDARD
TZOFFSETFROM:-0600
TZOFFSETTO:-0600
TZNAME:CET
DTSTART:19701025T030000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTART%(dtstart-tzid)s:%(dtstart)s
DTEND%(dtend-tzid)s:%(dtend)s
SUMMARY:Test calendar
UID:uid
END:VEVENT
END:VCALENDAR
"""
def tearDown(self):
if self.bouncer:
self.bouncer.stop()
def bouncer_from_ical(self, data):
tmp = tempfile.NamedTemporaryFile()
tmp.write(data)
tmp.flush()
conf = _get_config(tmp.name)
return icalbouncer.IcalBouncer(conf)
def ical_from_specs(self, dtstart_tzid, dtstart, dtend_tzid, dtend):
return self.ical_template % {'dtstart-tzid': dtstart_tzid,
'dtstart': vDatetime(dtstart).ical(),
'dtend-tzid': dtend_tzid,
'dtend': vDatetime(dtend).ical()}
def _approved_callback(self, keycard):
self.failUnless(keycard)
self.assertEquals(keycard.state, keycards.AUTHENTICATED)
def _denied_callback(self, keycard):
self.failIf(keycard)
class TestIcalBouncerUTC(TestIcalBouncerRunning, RequiredModulesMixin):
def testDeniedUTC(self):
data = self.ical_from_specs('', self.a_day_ago,
'', self.half_an_hour_ago)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._denied_callback)
return d
def testApprovedUTC(self):
data = self.ical_from_specs('', self.a_day_ago,
'', self.in_half_an_hour)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._approved_callback)
return d
class TestIcalBouncerTZID(TestIcalBouncerRunning, RequiredModulesMixin):
def setUp(self):
TestIcalBouncerRunning.setUp(self)
# Beijing is UTC+8
self.beijing_tz = tz.gettz('Asia/Shanghai')
if self.beijing_tz is None:
raise unittest.SkipTest("Could not find tzinfo data "
"for the Asia/Shanghai timezone")
# Guatemala is UTC+8
self.guatemala_tz = tz.gettz('America/Guatemala')
if self.guatemala_tz is None:
raise unittest.SkipTest("Could not find tzinfo data "
"for the America/Guatemala timezone")
def testIncorrectTimeTZID(self):
naive_new_end = self.in_half_an_hour.replace(tzinfo=None)
# This will fail the assertion that an event can't start after
# it ended (if our timezone handling is correct)
data = self.ical_from_specs('', self.half_an_hour_ago,
';TZID=Asia/Shanghai', naive_new_end)
self.bouncer = self.bouncer_from_ical(data)
self.assertEquals(self.bouncer.getMood(), moods.sad.value)
def testDeniedTZID(self):
new_end = self.half_an_hour_ago.astimezone(self.beijing_tz)
naive_new_end = new_end.replace(tzinfo=None)
data = self.ical_from_specs('', self.a_day_ago,
';TZID=Asia/Shanghai', naive_new_end)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._denied_callback)
return d
def testDeniedIfNotDefinedTZID(self):
new_end = self.half_an_hour_ago.astimezone(self.beijing_tz)
naive_new_end = new_end.replace(tzinfo=None)
data = self.ical_from_specs('', self.a_day_ago,
';TZID=/some/obscure/path/Asia/Shanghai',
naive_new_end)
try:
self.bouncer = self.bouncer_from_ical(data)
except NotCompilantError:
pass
else:
self.assert_(True)
def testApprovedBothTZID(self):
new_start = self.half_an_hour_ago.astimezone(self.beijing_tz)
naive_new_start = new_start.replace(tzinfo=None)
new_end = self.in_half_an_hour.astimezone(self.guatemala_tz)
naive_new_end = new_end.replace(tzinfo=None)
data = self.ical_from_specs(';TZID=Asia/Shanghai', naive_new_start,
';TZID=America/Guatemala', naive_new_end)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._approved_callback)
return d
def testApprovedKeycardDurationCalculationTZID(self):
in_one_minute = datetime.now(self.beijing_tz) + timedelta(minutes=1)
naive_in_one_minute = in_one_minute.replace(tzinfo=None)
data = self.ical_from_specs('', self.a_day_ago,
';TZID=Asia/Shanghai', naive_in_one_minute)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
def approved_and_calculate(result):
self.failUnless(result)
self.assertEquals(result.state, keycards.AUTHENTICATED)
self.failUnless(result.duration)
self.failIf(result.duration > 60)
d.addCallback(approved_and_calculate)
return d
class TestIcalBouncerFloating(TestIcalBouncerRunning, RequiredModulesMixin):
def testApprovedBothFloating(self):
new_start = self.half_an_hour_ago.astimezone(tz.LOCAL)
new_end = self.in_half_an_hour.astimezone(tz.LOCAL)
new_start_naive = new_start.replace(tzinfo=None)
new_end_naive = new_end.replace(tzinfo=None)
data = self.ical_from_specs('', new_start_naive,
'', new_end_naive)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._approved_callback)
return d
def testDeniedUTCAndFloating(self):
new_start = self.a_day_ago.astimezone(tz.LOCAL)
new_start_naive = new_start.replace(tzinfo=None)
data = self.ical_from_specs('', new_start_naive,
'', self.half_an_hour_ago)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._denied_callback)
return d
def testApprovedTZFromEnvironmentWithFloating(self):
def _restoreTZEnv(result, oldTZ):
if oldTZ is None:
del os.environ['TZ']
else:
os.environ['TZ'] = oldTZ
time.tzset()
eventcalendar.tz.LOCAL = tz.LocalTimezone()
return result
new_end_naive = self.half_an_hour_ago.replace(tzinfo=None)
oldTZ = os.environ.get('TZ', None)
os.environ['TZ'] = 'US/Pacific'
time.tzset()
eventcalendar.tz.LOCAL = tz.LocalTimezone()
data = self.ical_from_specs('', self.half_an_hour_ago,
'', new_end_naive)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._approved_callback)
d.addBoth(_restoreTZEnv, oldTZ)
return d
class TestIcalBouncerOverlap(testsuite.TestCase, RequiredModulesMixin):
def setUp(self):
self.bouncer = None
self.now = datetime.now(tz.UTC)
td = icalbounceralgorithm.IcalBouncerAlgorithm.maxKeyCardDuration
self.maxKeyCardDuration = max(td.days * 24 * 60 * 60 + td.seconds + \
td.microseconds / 1e6, 0)
self.ical_template = """
BEGIN:VCALENDAR
PRODID:-//Flumotion Fake Calendar Creator//flumotion.com//
VERSION:2.0
BEGIN:VEVENT
DTSTART:%(dtstart1)s
DTEND:%(dtend1)s
SUMMARY:Test calendar
UID:uid1
END:VEVENT
BEGIN:VEVENT
DTSTART:%(dtstart2)s
DTEND:%(dtend2)s
SUMMARY:Test calendar
UID:uid2
END:VEVENT
BEGIN:VEVENT
DTSTART:%(dtstart3)s
DTEND:%(dtend3)s
SUMMARY:Test calendar
UID:uid3
END:VEVENT
END:VCALENDAR
"""
def tearDown(self):
if self.bouncer:
self.bouncer.stop()
def ical_from_specs(self, dates):
return self.ical_template % {'dtstart1': vDatetime(dates[0]).ical(),
'dtend1': vDatetime(dates[1]).ical(),
'dtstart2': vDatetime(dates[2]).ical(),
'dtend2': vDatetime(dates[3]).ical(),
'dtstart3': vDatetime(dates[4]).ical(),
'dtend3': vDatetime(dates[5]).ical(),
}
def _denied_callback(self, keycard):
self.failIf(keycard)
def _approved_and_calculate(self, result, target):
self.failUnless(result)
self.assertEquals(result.state,
keycards.AUTHENTICATED)
self.failUnless(result.duration)
self.failUnless(target - 30 < result.duration < target + 30)
def bouncer_from_ical(self, data):
tmp = tempfile.NamedTemporaryFile()
tmp.write(data)
tmp.flush()
conf = _get_config(tmp.name)
return icalbouncer.IcalBouncer(conf)
def _timedeltaToSeconds(self, td):
return max(td.days * 24 * 60 * 60 + td.seconds + \
td.microseconds / 1e6, 0)
def testOverlapLessThanWindowSize(self):
dates = [self.now - timedelta(minutes=1),
self.now + timedelta(seconds=0.4*self.maxKeyCardDuration),
self.now + timedelta(seconds=0.2*self.maxKeyCardDuration),
self.now + timedelta(seconds=0.6*self.maxKeyCardDuration),
self.now + timedelta(seconds=0.4*self.maxKeyCardDuration),
self.now + timedelta(seconds=0.8*self.maxKeyCardDuration),
]
data = self.ical_from_specs(dates)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._approved_and_calculate,\
0.8*self.maxKeyCardDuration)
return d
def testOverlapMoreThanWindowSize(self):
dates = [self.now - timedelta(minutes=1),
self.now + timedelta(seconds=0.6*self.maxKeyCardDuration),
self.now + timedelta(seconds=0.3*self.maxKeyCardDuration),
self.now + timedelta(seconds=0.9*self.maxKeyCardDuration),
self.now + timedelta(seconds=0.6*self.maxKeyCardDuration),
self.now + timedelta(seconds=1.2*self.maxKeyCardDuration),
]
data = self.ical_from_specs(dates)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._approved_and_calculate, self.maxKeyCardDuration)
return d
def testOverlapEndingSimulteanously(self):
dates = [self.now - timedelta(minutes=1),
self.now + timedelta(seconds=0.6*self.maxKeyCardDuration),
self.now - timedelta(minutes=2),
self.now + timedelta(seconds=0.6*self.maxKeyCardDuration),
self.now - timedelta(seconds=10),
self.now - timedelta(seconds=1),
]
data = self.ical_from_specs(dates)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._approved_and_calculate,\
0.6*self.maxKeyCardDuration)
return d
class TestIcalBouncerCalSwitch(TestIcalBouncerRunning, RequiredModulesMixin):
def _getCalendarFromString(self, data):
tmp = tempfile.NamedTemporaryFile()
tmp.write(data)
tmp.flush()
tmp.seek(0)
return eventcalendar.fromFile(tmp)
def testDonTRevoke(self):
data = self.ical_from_specs('', self.now,
'', self.in_half_an_hour)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
self.bouncer.authenticate(keycard)
data = self.ical_from_specs('', self.a_day_ago,
'', self.in_half_an_hour)
calendar = self._getCalendarFromString(data)
self.bouncer.get_main_algorithm().iCalScheduler.setCalendar(calendar)
self.failUnless(self.bouncer.hasKeycard(keycard))
def testRevoke(self):
data = self.ical_from_specs('', self.now,
'', self.in_half_an_hour)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
self.bouncer.authenticate(keycard)
data = self.ical_from_specs('', self.a_day_ago,
'', self.half_an_hour_ago)
calendar = self._getCalendarFromString(data)
self.bouncer.get_main_algorithm().iCalScheduler.setCalendar(calendar)
self.failIf(self.bouncer.hasKeycard(keycard))
|
timvideos/flumotion
|
flumotion/test/test_icalbouncer.py
|
Python
|
lgpl-2.1
| 17,170
|
import curses
import functools
from stem.control import EventType, Controller
from stem.util import str_tools
# colors that curses can handle
COLOR_LIST = {
"red": curses.COLOR_RED,
"green": curses.COLOR_GREEN,
"yellow": curses.COLOR_YELLOW,
"blue": curses.COLOR_BLUE,
"cyan": curses.COLOR_CYAN,
"magenta": curses.COLOR_MAGENTA,
"black": curses.COLOR_BLACK,
"white": curses.COLOR_WHITE,
}
GRAPH_WIDTH = 40
GRAPH_HEIGHT = 8
DOWNLOAD_COLOR = "green"
UPLOAD_COLOR = "blue"
def main():
with Controller.from_port(port = 9051) as controller:
controller.authenticate()
try:
# This makes curses initialize and call draw_bandwidth_graph() with a
# reference to the screen, followed by additional arguments (in this
# case just the controller).
curses.wrapper(draw_bandwidth_graph, controller)
except KeyboardInterrupt:
pass # the user hit ctrl+c
def draw_bandwidth_graph(stdscr, controller):
window = Window(stdscr)
# (downloaded, uploaded) tuples for the last 40 seconds
bandwidth_rates = [(0, 0)] * GRAPH_WIDTH
# Making a partial that wraps the window and bandwidth_rates with a function
# for Tor to call when it gets a BW event. This causes the 'window' and
# 'bandwidth_rates' to be provided as the first two arguments whenever
# 'bw_event_handler()' is called.
bw_event_handler = functools.partial(_handle_bandwidth_event, window, bandwidth_rates)
# Registering this listener with Tor. Tor reports a BW event each second.
controller.add_event_listener(bw_event_handler, EventType.BW)
# Pause the main thread until the user hits any key... and no, don't you dare
# ask where the 'any' key is. :P
stdscr.getch()
def _handle_bandwidth_event(window, bandwidth_rates, event):
# callback for when tor provides us with a BW event
bandwidth_rates.insert(0, (event.read, event.written))
bandwidth_rates = bandwidth_rates[:GRAPH_WIDTH] # truncate old values
_render_graph(window, bandwidth_rates)
def _render_graph(window, bandwidth_rates):
window.erase()
download_rates = [entry[0] for entry in bandwidth_rates]
upload_rates = [entry[1] for entry in bandwidth_rates]
# show the latest values at the top
label = "Downloaded (%s/s):" % str_tools.size_label(download_rates[0], 1)
window.addstr(0, 1, label, DOWNLOAD_COLOR, curses.A_BOLD)
label = "Uploaded (%s/s):" % str_tools.size_label(upload_rates[0], 1)
window.addstr(0, GRAPH_WIDTH + 7, label, UPLOAD_COLOR, curses.A_BOLD)
# draw the graph bounds in KB
max_download_rate = max(download_rates)
max_upload_rate = max(upload_rates)
window.addstr(1, 1, "%4i" % (max_download_rate / 1024), DOWNLOAD_COLOR)
window.addstr(GRAPH_HEIGHT, 1, " 0", DOWNLOAD_COLOR)
window.addstr(1, GRAPH_WIDTH + 7, "%4i" % (max_upload_rate / 1024), UPLOAD_COLOR)
window.addstr(GRAPH_HEIGHT, GRAPH_WIDTH + 7, " 0", UPLOAD_COLOR)
# draw the graph
for col in range(GRAPH_WIDTH):
col_height = GRAPH_HEIGHT * download_rates[col] / max(max_download_rate, 1)
for row in range(col_height):
window.addstr(GRAPH_HEIGHT - row, col + 6, " ", DOWNLOAD_COLOR, curses.A_STANDOUT)
col_height = GRAPH_HEIGHT * upload_rates[col] / max(max_upload_rate, 1)
for row in range(col_height):
window.addstr(GRAPH_HEIGHT - row, col + GRAPH_WIDTH + 12, " ", UPLOAD_COLOR, curses.A_STANDOUT)
window.refresh()
class Window(object):
"""
Simple wrapper for the curses standard screen object.
"""
def __init__(self, stdscr):
self._stdscr = stdscr
# Mappings of names to the curses color attribute. Initially these all
# reference black text, but if the terminal can handle color then
# they're set with that foreground color.
self._colors = dict([(color, 0) for color in COLOR_LIST])
# allows for background transparency
try:
curses.use_default_colors()
except curses.error:
pass
# makes the cursor invisible
try:
curses.curs_set(0)
except curses.error:
pass
# initializes colors if the terminal can handle them
try:
if curses.has_colors():
color_pair = 1
for name, foreground in COLOR_LIST.items():
background = -1 # allows for default (possibly transparent) background
curses.init_pair(color_pair, foreground, background)
self._colors[name] = curses.color_pair(color_pair)
color_pair += 1
except curses.error:
pass
def addstr(self, y, x, msg, color = None, attr = curses.A_NORMAL):
# Curses throws an error if we try to draw a message that spans out of the
# window's bounds (... seriously?), so doing our best to avoid that.
if color is not None:
if color not in self._colors:
recognized_colors = ", ".join(self._colors.keys())
raise ValueError("The '%s' color isn't recognized: %s" % (color, recognized_colors))
attr |= self._colors[color]
max_y, max_x = self._stdscr.getmaxyx()
if max_x > x and max_y > y:
try:
self._stdscr.addstr(y, x, msg[:max_x - x], attr)
except:
pass # maybe an edge case while resizing the window
def erase(self):
self._stdscr.erase()
def refresh(self):
self._stdscr.refresh()
if __name__ == '__main__':
main()
|
tparks5/tor-stem
|
docs/_static/example/event_listening.py
|
Python
|
lgpl-3.0
| 5,286
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class TumblrIE(InfoExtractor):
_VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])'
_TESTS = [{
'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes',
'md5': '479bb068e5b16462f5176a6828829767',
'info_dict': {
'id': '54196191430',
'ext': 'mp4',
'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes ↳...',
'description': 'md5:37db8211e40b50c7c44e95da14f630b7',
'thumbnail': 're:http://.*\.jpg',
}
}, {
'url': 'http://5sostrum.tumblr.com/post/90208453769/yall-forgetting-the-greatest-keek-of-them-all',
'md5': 'bf348ef8c0ef84fbf1cbd6fa6e000359',
'info_dict': {
'id': '90208453769',
'ext': 'mp4',
'title': '5SOS STRUM ;]',
'description': 'md5:dba62ac8639482759c8eb10ce474586a',
'thumbnail': 're:http://.*\.jpg',
}
}]
def _real_extract(self, url):
m_url = re.match(self._VALID_URL, url)
video_id = m_url.group('id')
blog = m_url.group('blog_name')
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
webpage = self._download_webpage(url, video_id)
iframe_url = self._search_regex(
r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'',
webpage, 'iframe url')
iframe = self._download_webpage(iframe_url, video_id)
video_url = self._search_regex(r'<source src="([^"]+)"',
iframe, 'video url')
# The only place where you can get a title, it's not complete,
# but searching in other places doesn't work for all videos
video_title = self._html_search_regex(
r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
webpage, 'title')
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': video_title,
'description': self._og_search_description(webpage, default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
}
|
marxin/youtube-dl
|
youtube_dl/extractor/tumblr.py
|
Python
|
unlicense
| 2,338
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
from trove.common.strategies.strategy import Strategy
class Storage(Strategy):
"""Base class for Storage Strategy implementation."""
__strategy_type__ = 'storage'
__strategy_ns__ = 'trove.common.strategies.storage'
def __init__(self, context):
self.context = context
super(Storage, self).__init__()
@abc.abstractmethod
def save(self, filename, stream, metadata=None):
"""Persist information from the stream."""
@abc.abstractmethod
def load(self, location, backup_checksum):
"""Load a stream from a persisted storage location."""
@abc.abstractmethod
def load_metadata(self, location, backup_checksum):
"""Load metadata for a persisted object."""
@abc.abstractmethod
def save_metadata(self, location, metadata={}):
"""Save metadata for a persisted object."""
|
mmasaki/trove
|
trove/common/strategies/storage/base.py
|
Python
|
apache-2.0
| 1,530
|
# Copyright (c) 2016 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
consoleauth_topic_opt = cfg.StrOpt('consoleauth_topic',
default='consoleauth',
help='The topic console auth proxy nodes listen on')
console_token_ttl = cfg.IntOpt('console_token_ttl',
default=600,
help='How many seconds before deleting tokens')
CONSOLEAUTH_OPTS = [consoleauth_topic_opt, console_token_ttl]
def register_opts(conf):
conf.register_opts(CONSOLEAUTH_OPTS)
def list_opts():
return {'DEFAULT': CONSOLEAUTH_OPTS}
|
bigswitch/nova
|
nova/conf/consoleauth.py
|
Python
|
apache-2.0
| 1,156
|
"""Support for Soma sensors."""
from datetime import timedelta
import logging
from requests import RequestException
from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from . import DEVICES, SomaEntity
from .const import API, DOMAIN
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=30)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Soma sensor platform."""
devices = hass.data[DOMAIN][DEVICES]
async_add_entities(
[SomaSensor(sensor, hass.data[DOMAIN][API]) for sensor in devices], True
)
class SomaSensor(SomaEntity, Entity):
"""Representation of a Soma cover device."""
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_BATTERY
@property
def name(self):
"""Return the name of the device."""
return self.device["name"] + " battery level"
@property
def state(self):
"""Return the state of the entity."""
return self.battery_state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return PERCENTAGE
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Update the sensor with the latest data."""
try:
_LOGGER.debug("Soma Sensor Update")
response = await self.hass.async_add_executor_job(
self.api.get_battery_level, self.device["mac"]
)
except RequestException:
_LOGGER.error("Connection to SOMA Connect failed")
self.is_available = False
return
if response["result"] != "success":
_LOGGER.error(
"Unable to reach device %s (%s)", self.device["name"], response["msg"]
)
self.is_available = False
return
# https://support.somasmarthome.com/hc/en-us/articles/360026064234-HTTP-API
# battery_level response is expected to be min = 360, max 410 for
# 0-100% levels above 410 are consider 100% and below 360, 0% as the
# device considers 360 the minimum to move the motor.
_battery = round(2 * (response["battery_level"] - 360))
battery = max(min(100, _battery), 0)
self.battery_state = battery
self.is_available = True
|
partofthething/home-assistant
|
homeassistant/components/soma/sensor.py
|
Python
|
apache-2.0
| 2,532
|
#!/usr/bin/env python
import glob
import copy
import cv2
import cv_bridge
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import Int32, Float32, String
import rospkg
class Hear_orders:
def __init__(self):
self.speech_subscriber = rospy.Subscriber("/speech_recognition", String, self.publish_emotion)
self.emotion_publisher = rospy.Publisher("/emotion", String, queue_size=10)
# self.timer = rospy.Timer(rospy.Duration(self.velocity), self.timer_cb)
def publish_emotion(self, data):
self.emotion_publisher.publish("heard_an_order")
def main():
rospy.init_node('hearing_node', anonymous=True)
rate = rospy.Rate(30)
rospack = rospkg.RosPack()
# path = rospack.get_path('baxter_face_animation') + "/data/"
Hear_orders()
while not rospy.is_shutdown():
rate.sleep()
if __name__ == "__main__":
main()
|
UCRoboticsLab/BaxterTictactoe
|
src/baxter_face_animation/src/baxter_face_animation/hear_words.py
|
Python
|
apache-2.0
| 926
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nstimer(base_resource) :
""" Configuration for Timer resource. """
def __init__(self) :
self._name = ""
self._interval = 0
self._unit = ""
self._comment = ""
self._newname = ""
self.___count = 0
@property
def name(self) :
ur"""Timer name.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Timer name.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def interval(self) :
ur"""The frequency at which the policies bound to this timer are invoked. The minimum value is 20 msec. The maximum value is 20940 in seconds and 349 in minutes.<br/>Default value: 5<br/>Minimum length = 1<br/>Maximum length = 20940000.
"""
try :
return self._interval
except Exception as e:
raise e
@interval.setter
def interval(self, interval) :
ur"""The frequency at which the policies bound to this timer are invoked. The minimum value is 20 msec. The maximum value is 20940 in seconds and 349 in minutes.<br/>Default value: 5<br/>Minimum length = 1<br/>Maximum length = 20940000
"""
try :
self._interval = interval
except Exception as e:
raise e
@property
def unit(self) :
ur"""Timer interval unit.<br/>Default value: SEC<br/>Possible values = SEC, MIN.
"""
try :
return self._unit
except Exception as e:
raise e
@unit.setter
def unit(self, unit) :
ur"""Timer interval unit.<br/>Default value: SEC<br/>Possible values = SEC, MIN
"""
try :
self._unit = unit
except Exception as e:
raise e
@property
def comment(self) :
ur"""Comments associated with this timer.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
ur"""Comments associated with this timer.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def newname(self) :
ur"""The new name of the timer.<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
ur"""The new name of the timer.<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nstimer_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nstimer
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add nstimer.
"""
try :
if type(resource) is not list :
addresource = nstimer()
addresource.name = resource.name
addresource.interval = resource.interval
addresource.unit = resource.unit
addresource.comment = resource.comment
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].interval = resource[i].interval
addresources[i].unit = resource[i].unit
addresources[i].comment = resource[i].comment
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete nstimer.
"""
try :
if type(resource) is not list :
deleteresource = nstimer()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update nstimer.
"""
try :
if type(resource) is not list :
updateresource = nstimer()
updateresource.name = resource.name
updateresource.interval = resource.interval
updateresource.unit = resource.unit
updateresource.comment = resource.comment
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].interval = resource[i].interval
updateresources[i].unit = resource[i].unit
updateresources[i].comment = resource[i].comment
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of nstimer resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = nstimer()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
unsetresource.interval = resource.interval
unsetresource.unit = resource.unit
unsetresource.comment = resource.comment
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ nstimer() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
unsetresources[i].interval = resource[i].interval
unsetresources[i].unit = resource[i].unit
unsetresources[i].comment = resource[i].comment
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_name) :
ur""" Use this API to rename a nstimer resource.
"""
try :
renameresource = nstimer()
if type(resource) == cls :
renameresource.name = resource.name
else :
renameresource.name = resource
return renameresource.rename_resource(client,new_name)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the nstimer resources that are configured on netscaler.
"""
try :
if not name :
obj = nstimer()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = nstimer()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [nstimer() for _ in range(len(name))]
obj = [nstimer() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = nstimer()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of nstimer resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nstimer()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the nstimer resources configured on NetScaler.
"""
try :
obj = nstimer()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of nstimer resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nstimer()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Unit:
SEC = "SEC"
MIN = "MIN"
class nstimer_response(base_response) :
def __init__(self, length=1) :
self.nstimer = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nstimer = [nstimer() for _ in range(length)]
|
benfinke/ns_python
|
nssrc/com/citrix/netscaler/nitro/resource/config/ns/nstimer.py
|
Python
|
apache-2.0
| 10,753
|
def main():
info('Evacuate Microbone')
close(description='Jan Inlet')
open(description='Jan Ion Pump')
#close(description='Minibone to Bone')
open(description='Minibone to Bone')
#close(description='Microbone to Minibone')
open(description='Microbone to Minibone')
open('C')
close('P')
close(description='Microbone to CO2 Laser')
open(description='Microbone to Turbo')
open(description='Microbone to Inlet Pipette')
open(description='Microbone to Getter NP-10C')
#evacuate apis section
#info('evacuate apis')
open(description='Microbone to Getter NP-10H')
#sleep(15)
#close(description='Microbone to Getter NP-10H')
|
USGSDenverPychron/pychron
|
docs/user_guide/operation/scripts/examples/argus/extraction/apis/PrepareForApis.py
|
Python
|
apache-2.0
| 702
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceQueueingStateSaver and wrappers.
Please see the reading data how-to for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_restore_sparse = sparse_ops._take_many_sparse_from_tensors_map
_store_sparse = sparse_ops._add_many_sparse_to_tensors_map
# pylint: enable=protected-access
class _SequenceInputWrapper(object):
"""A wrapper object for storing sequence-related input.
The SequenceInputWapper accepts four objects:
length: A scalar int containing the length of the input sequence.
key: A scalar string containing the unique key of the input sequence.
sequences: A dict mapping labels, like `input`, to tensors
whose initial index dimension is at least size `length`.
context: A dict mapping labels, like `global_target`, to tensors
that represent data across the entire example.
"""
def __init__(self, length, key, sequences, context):
length = ops.convert_to_tensor(length, name="length")
key = ops.convert_to_tensor(key, name="key")
if not isinstance(sequences, dict):
raise TypeError("sequences must be a dict")
if not isinstance(context, dict):
raise TypeError("context must be a dict")
if not sequences:
raise ValueError("must have at least one sequence tensor")
for k in sequences.keys():
if not isinstance(k, six.string_types):
raise TypeError("sequence key must be string: %s" % k)
if ":" in k:
raise ValueError("sequence key may not have a colon: '%s'" % k)
for k in context.keys():
if not isinstance(k, six.string_types):
raise TypeError("context key must be string: %s" % k)
if ":" in k:
raise ValueError("context key may not have a colon: '%s'" % k)
sequences = dict((k, ops.convert_to_tensor(
v, name="sequence_%s" % k)) for k, v in sequences.items())
context = dict((k, ops.convert_to_tensor(
v, name="context_%s" % k)) for k, v in context.items())
self._length = length
self._key = key
self._sequences = sequences
self._context = context
@property
def length(self):
return self._length
@property
def key(self):
return self._key
@property
def sequences(self):
return self._sequences
@property
def context(self):
return self._context
def _check_multiple_of(value, multiple_of):
"""Checks that value `value` is a non-zero multiple of `multiple_of`.
Args:
value: an int32 scalar Tensor.
multiple_of: an int or int32 scalar Tensor.
Returns:
new_value: an int32 scalar Tensor matching `value`, but which includes an
assertion that `value` is a multiple of `multiple_of`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(math_ops.mod(value, multiple_of), 0),
math_ops.not_equal(value, 0)), [
string_ops.string_join([
"Tensor %s should be a multiple of: " % value.name,
string_ops.as_string(multiple_of), ", but saw value: ",
string_ops.as_string(value),
". Consider setting pad=True."
])
])
]):
new_value = array_ops.identity(value, name="multiple_of_checked")
return new_value
def _check_rank(value, expected_rank):
"""Check the rank of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_rank: int32 scalar (optionally a `Tensor`).
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its rank. If expected_rank is not a `Tensor`, then
new_value's shape's rank has been set.
Raises:
ValueError: if `expected_rank` is not a `Tensor` and the rank of `value`
is known and is not equal to `expected_rank`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.equal(expected_rank, array_ops.rank(value)), [
string_ops.string_join([
"Rank of tensor %s should be: " % value.name,
string_ops.as_string(expected_rank), ", shape received:"
]), array_ops.shape(value)
])
]):
new_value = array_ops.identity(value, name="rank_checked")
if isinstance(expected_rank, ops.Tensor):
expected_rank_value = tensor_util.constant_value(expected_rank)
if expected_rank_value is not None:
expected_rank = int(expected_rank_value)
if not isinstance(expected_rank, ops.Tensor):
try:
new_value.set_shape(new_value.get_shape().with_rank(expected_rank))
except ValueError as e:
raise ValueError("Rank check failed for %s: %s" % (value.name, str(e)))
return new_value
def _check_shape(value, expected_shape):
"""Check the shape of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_shape: a `TensorShape`, list of `int32`, or a vector `Tensor`.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its shape. If expected_shape is not a `Tensor`, then
new_value's shape has been set.
Raises:
ValueError: if `expected_shape` is not a `Tensor` and the shape of `value`
is known and is not equal to `expected_shape`.
"""
assert isinstance(value, ops.Tensor)
if isinstance(expected_shape, tensor_shape.TensorShape):
expected_shape = expected_shape.as_list()
if isinstance(expected_shape, ops.Tensor):
expected_shape_value = tensor_util.constant_value(expected_shape)
if expected_shape_value is not None:
expected_shape = [int(d) for d in expected_shape_value]
if isinstance(expected_shape, ops.Tensor):
value = _check_rank(value, array_ops.size(expected_shape))
else:
value = _check_rank(value, len(expected_shape))
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.reduce_all(
math_ops.equal(expected_shape, array_ops.shape(value))), [
string_ops.string_join([
"Shape of tensor %s should be: " % value.name,
string_ops.as_string(expected_shape),
", shape received: ",
string_ops.as_string(array_ops.shape(value))
])
])
]):
new_value = array_ops.identity(value, name="shape_checked")
if not isinstance(expected_shape, ops.Tensor):
try:
new_value.set_shape(new_value.get_shape().merge_with(expected_shape))
except ValueError as e:
raise ValueError("Shape check failed for %s: %s" % (value.name, str(e)))
return new_value
def _check_dimensions(value, dimensions, expected_sizes, debug_prefix):
"""Check the dimensions of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, with optional / partial shape associated shape information.
dimensions: An int list, the dimensions to check.
expected_sizes: list of mixed ints and int32 scalar tensors.
Optionally also a vector `Tensor`.
debug_prefix: A string, used for naming ops and printing debugging messages.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its shape. If expected_sizes is not a `Tensor`, then
new_value's shape has been set for all `dimensions[i]` where
`expected_sizes[i]` is not a `Tensor`.
Raises:
TypeError: if any of the input contains invalid types:
if `value` is not a `Tensor`.
if `dimensions` is not a `list` or `tuple`.
ValueError: if input has incorrect sizes or inferred shapes do not match:
if `dimensions` contains repeated dimensions.
if `expected_sizes` is not a `Tensor` and its length does not match that
`dimensions`.
if `value`'s shape has a well-defined rank, and one of the values in
`dimensions` is equal to or above this rank.
if `value`'s shape is well defined for some `dimensions[i]`, and
`expected_sizes[i]` is not a `Tensor`, and these two values do
not match.
"""
if not isinstance(dimensions, (list, tuple)):
raise TypeError("dimensions must be a list or tuple")
if len(set(dimensions)) != len(dimensions):
raise ValueError("dimensions are not unique: %s" % dimensions)
if not isinstance(value, ops.Tensor):
raise TypeError("value is not a Tensor: %s" % value)
value_shape = value.get_shape()
if not isinstance(expected_sizes, ops.Tensor):
if len(dimensions) != len(expected_sizes):
raise ValueError("len(dimensions) != len(expected_sizes): %d vs. %d" %
(len(dimensions), len(expected_sizes)))
if value_shape.ndims is not None:
if value_shape.ndims <= max(dimensions):
raise ValueError(
"%s: rank of input is not greater than max(dimensions): "
"%d vs. %d" % (debug_prefix, value.get_shape().ndims,
max(dimensions)))
value_dims = value_shape.as_list()
for d, s in zip(dimensions, expected_sizes):
if not isinstance(s, ops.Tensor):
value_dims[d] = s
try:
value.set_shape(value.get_shape().merge_with(value_dims))
except ValueError as e:
raise ValueError("Dimensions check failed for %s: %s" %
(debug_prefix, str(e)))
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.equal(expected_size, array_ops.shape(value)[dimension]), [
string_ops.string_join([
"Dimension %d of tensor labeled %s should be: " %
(dimension, debug_prefix),
string_ops.as_string(expected_size), ", shape received: ",
string_ops.as_string(array_ops.shape(value))
])
]) for (dimension, expected_size) in zip(dimensions, expected_sizes)
]):
new_value = array_ops.identity(value, name="dims_checked_%s" % debug_prefix)
return new_value
def _prepare_sequence_inputs(inputs, states):
"""Convert input to tensors and validate shape information.
Args:
inputs: A `_SequenceInputWrapper` instance.
states: A dictionary mapping state names to input constants or tensors.
Returns:
The tuple (length, key, sorted_states, sorted_sequences, sorted_context),
where each value has been checked for valid shape, and the sorted_* dicts
are instances of OrderedDict; with key-value pairs sorted by key.
Raises:
ValueError: if the shapes of inputs.context.values(), states.values(),
or inputs.sequences.values() are not fully defined (with the exception
of the dimension of any `Tensor` in inputs.sequences.values()).
TypeError: if the dtype of length is not int32.
"""
# Convert state initial values to tensors
states = dict((k, ops.convert_to_tensor(
v, name="state_%s" % k)) for k, v in states.items())
def _assert_fully_defined(label, dict_, ignore_first_dimension=False):
start_dimension = 1 if ignore_first_dimension else 0
for k, v in dict_.items():
if not v.get_shape()[start_dimension:].is_fully_defined():
raise ValueError("Shape for %s %s is not fully defined %s: %s" %
(label, k, "(ignoring first dimension)" if
ignore_first_dimension else "", v.get_shape()))
_assert_fully_defined("state", states)
_assert_fully_defined("context", inputs.context)
# Sequences' first dimension (time) may be variable
_assert_fully_defined(
"sequence", inputs.sequences, ignore_first_dimension=True)
# Get dictionaries' dtypes ordered by name - ordering is important
# when switching between dicts and tuples for passing to Barrier.
def _sort_by_name(d):
return collections.OrderedDict(sorted(d.items(), key=lambda k_v: k_v[0]))
sorted_sequences = _sort_by_name(inputs.sequences)
sorted_context = _sort_by_name(inputs.context)
sorted_states = _sort_by_name(states)
length = _check_rank(inputs.length, 0)
key = _check_rank(inputs.key, 0)
if length.dtype != dtypes.int32:
raise TypeError("length dtype must be int32, but received: %s" %
length.dtype)
if key.dtype != dtypes.string:
raise TypeError("key dtype must be string, but received: %s" % key.dtype)
return (length, key, sorted_states, sorted_sequences, sorted_context)
# NextQueuedSequenceBatch works closely with
# SequenceQueueingStateSaver and requires access to its private properties
# pylint: disable=protected-access
class NextQueuedSequenceBatch(object):
"""NextQueuedSequenceBatch stores deferred SequenceQueueingStateSaver data.
This class is instantiated by `SequenceQueueingStateSaver` and is accessible
via its `next_batch` property.
"""
def __init__(self, state_saver):
self._state_saver = state_saver
@property
def total_length(self):
"""The lengths of the original (non-truncated) unrolled examples.
Returns:
An integer vector of length `batch_size`, the total lengths.
"""
return self._state_saver._received_total_length
@property
def length(self):
"""The lengths of the given truncated unrolled examples.
For initial iterations, for which `sequence * num_unroll < length`,
this number is `num_unroll`. For the remainder,
this number is between `0` and `num_unroll`.
Returns:
An integer vector of length `batch_size`, the lengths.
"""
return self._state_saver._received_length
@property
def batch_size(self):
"""The batch_size of the given batch.
Usually, this is the batch_size requested when initializing the SQSS, but
if allow_small_batch=True this will become smaller when inputs are
exhausted.
Returns:
A scalar integer tensor, the batch_size
"""
return self._state_saver._received_batch_size
@property
def insertion_index(self):
"""The insertion indices of the examples (when they were first added).
These indices start with the value -2**63 and increase with every
call to the prefetch op. Each whole example gets its own insertion
index, and this is used to prioritize the example so that its truncated
segments appear in adjacent iterations, even if new examples are inserted
by the prefetch op between iterations.
Returns:
An int64 vector of length `batch_size`, the insertion indices.
"""
return self._state_saver._received_indices
@property
def key(self):
"""The key names of the given truncated unrolled examples.
The format of the key is:
```python
"%05d_of_%05d:%s" % (sequence, sequence_count, original_key)
```
where `original_key` is the unique key read in by the prefetcher.
Returns:
A string vector of length `batch_size`, the keys.
"""
return self._state_saver._received_keys
@property
def next_key(self):
"""The key names of the next (in iteration) truncated unrolled examples.
The format of the key is:
```python
"%05d_of_%05d:%s" % (sequence + 1, sequence_count, original_key)
```
if `sequence + 1 < sequence_count`, otherwise:
```python
"STOP:%s" % original_key
```
where `original_key` is the unique key read in by the prefetcher.
Returns:
A string vector of length `batch_size`, the keys.
"""
return self._state_saver._received_next_key
@property
def sequence(self):
"""An int32 vector, length `batch_size`: the sequence index of each entry.
When an input is split up, the sequence values
```
0, 1, ..., sequence_count - 1
```
are assigned to each split.
Returns:
An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence
@property
def sequence_count(self):
"""An int32 vector, length `batch_size`: the sequence count of each entry.
When an input is split up, the number of splits is equal to:
`padded_length / num_unroll`. This is the sequence_count.
Returns:
An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence_count
@property
def context(self):
"""A dict mapping keys of `input_context` to batched context.
Returns:
A dict mapping keys of `input_context` to tensors.
If we had at input:
```python
context["name"].get_shape() == [d1, d2, ...]
```
then for this property:
```python
context["name"].get_shape() == [batch_size, d1, d2, ...]
```
"""
return self._state_saver._received_context
@property
def sequences(self):
"""A dict mapping keys of `input_sequences` to split and rebatched data.
Returns:
A dict mapping keys of `input_sequences` to tensors.
If we had at input:
```python
sequences["name"].get_shape() == [None, d1, d2, ...]
```
where `None` meant the sequence time was dynamic, then for this property:
```python
sequences["name"].get_shape() == [batch_size, num_unroll, d1, d2, ...].
```
"""
return self._state_saver._received_sequences
def state(self, state_name):
"""Returns batched state tensors.
Args:
state_name: string, matches a key provided in `initial_states`.
Returns:
A `Tensor`: a batched set of states, either initial states (if this is
the first run of the given example), or a value as stored during
a previous iteration via `save_state` control flow.
Its type is the same as `initial_states["state_name"].dtype`.
If we had at input:
```python
initial_states[state_name].get_shape() == [d1, d2, ...],
```
then
```python
state(state_name).get_shape() == [batch_size, d1, d2, ...]
```
Raises:
KeyError: if `state_name` does not match any of the initial states
declared in `initial_states`.
"""
return self._state_saver._received_states[state_name]
def save_state(self, state_name, value, name=None):
"""Returns an op to save the current batch of state `state_name`.
Args:
state_name: string, matches a key provided in `initial_states`.
value: A `Tensor`.
Its type must match that of `initial_states[state_name].dtype`.
If we had at input:
```python
initial_states[state_name].get_shape() == [d1, d2, ...]
```
then the shape of `value` must match:
```python
tf.shape(value) == [batch_size, d1, d2, ...]
```
name: string (optional). The name scope for newly created ops.
Returns:
A control flow op that stores the new state of each entry into
the state saver. This op must be run for every iteration that
accesses data from the state saver (otherwise the state saver
will never progress through its states and run out of capacity).
Raises:
KeyError: if `state_name` does not match any of the initial states
declared in `initial_states`.
"""
if state_name not in self._state_saver._received_states.keys():
raise KeyError("state was not declared: %s" % state_name)
default_name = "InputQueueingStateSaver_SaveState"
with ops.name_scope(name, default_name, values=[value]):
# Place all operations on the CPU. Barriers and queues are only
# implemented for CPU, but all the other book-keeping operations
# (reshape, shape, range, ...) would be placed on GPUs if available,
# unless we explicitly tie them to CPU.
with ops.colocate_with(self._state_saver._capacity_queue.queue_ref):
indices_where_not_done = array_ops.reshape(
array_ops.where(
math_ops.logical_not(self._state_saver._sequence_is_done)),
[-1])
keeping_next_key = array_ops.gather(
self._state_saver._received_next_key, indices_where_not_done)
value = _check_shape(
array_ops.identity(
value, name="convert_%s" % state_name),
array_ops.shape(self._state_saver._received_states[state_name]))
keeping_state = array_ops.gather(value, indices_where_not_done)
return self._state_saver._barrier.insert_many(
self._state_saver._get_barrier_index("state", state_name),
keeping_next_key,
keeping_state,
name="BarrierInsertState_%s" % state_name)
# pylint: enable=protected-access
class SequenceQueueingStateSaver(object):
"""SequenceQueueingStateSaver provides access to stateful values from input.
This class is meant to be used instead of, e.g., a `Queue`, for splitting
variable-length sequence inputs into segments of sequences with fixed length
and batching them into mini-batches. It maintains contexts and state for a
sequence across the segments. It can be used in conjunction with a
`QueueRunner` (see the example below).
The `SequenceQueueingStateSaver` (SQSS) accepts one example at a time via the
inputs `input_length`, `input_key`, `input_sequences` (a dict),
`input_context` (a dict), and `initial_states` (a dict).
The sequences, values in `input_sequences`, may have variable first dimension
(the `padded_length`), though this dimension must always be a multiple of
`num_unroll`. All other dimensions must be fixed and accessible via
`get_shape` calls. The length prior to padding can be recorded in
`input_length`. The context values in `input_context` must all have fixed and
well defined dimensions. The initial state values must all have fixed and
well defined dimensions.
The SQSS splits the sequences of an input example into segments of length
`num_unroll`. Across examples minibatches of size `batch_size` are formed.
These minibatches contain a segment of the sequences, copy the context values,
and maintain state, length, and key information of the original input
examples. In the first segment of an example the state is still the initial
state. It can then be updated; and updated state values are accessible in
subsequent segments of the same example. After each segment
`batch.save_state()` must be called which is done by the state_saving_rnn.
Without this call, the dequeue op associated with the SQSS will not run.
Internally, SQSS has a queue for the input examples. Its `capacity` is
configurable. If set smaller than `batch_size` then the dequeue op will block
indefinitely. A small multiple of `batch_size` is a good rule of thumb to
prevent that queue from becoming a bottleneck and slowing down training.
If set too large (and note that it defaults to unbounded) memory consumption
goes up. Moreover, when iterating over the same input examples multiple times
reusing the same `key` the `capacity` must be smaller than the number of
examples.
The prefetcher, which reads one unrolled, variable-length input sequence at
a time, is accessible via `prefetch_op`. The underlying `Barrier` object
is accessible via `barrier`. Processed minibatches, as well as
state read and write capabilities are accessible via `next_batch`.
Specifically, `next_batch` provides access to all of the minibatched
data, including the following, see `NextQueuedSequenceBatch` for details:
* `total_length`, `length`, `insertion_index`, `key`, `next_key`,
* `sequence` (the index each minibatch entry's time segment index),
* `sequence_count` (the total time segment count for each minibatch entry),
* `context` (a dict of the copied minibatched context values),
* `sequences` (a dict of the split minibatched variable-length sequences),
* `state` (to access the states of the current segments of these entries)
* `save_state` (to save the states for the next segments of these entries)
Example usage:
```python
batch_size = 32
num_unroll = 20
lstm_size = 8
cell = tf.contrib.rnn.BasicLSTMCell(num_units=lstm_size)
initial_state_values = tf.zeros(cell.state_size, dtype=tf.float32)
raw_data = get_single_input_from_input_reader()
length, key, sequences, context = my_parser(raw_data)
assert "input" in sequences.keys()
assert "label" in context.keys()
initial_states = {"lstm_state": initial_state_value}
stateful_reader = tf.SequenceQueueingStateSaver(
batch_size, num_unroll,
length=length, input_key=key, input_sequences=sequences,
input_context=context, initial_states=initial_states,
capacity=batch_size*100)
batch = stateful_reader.next_batch
inputs = batch.sequences["input"]
context_label = batch.context["label"]
inputs_by_time = tf.split(value=inputs, num_or_size_splits=num_unroll, axis=1)
assert len(inputs_by_time) == num_unroll
lstm_output, _ = tf.contrib.rnn.static_state_saving_rnn(
cell,
inputs_by_time,
state_saver=batch,
state_name="lstm_state")
# Start a prefetcher in the background
sess = tf.Session()
num_threads = 3
queue_runner = tf.train.QueueRunner(
stateful_reader, [stateful_reader.prefetch_op] * num_threads)
tf.train.add_queue_runner(queue_runner)
tf.train.start_queue_runners(sess=session)
while True:
# Step through batches, perform training or inference...
session.run([lstm_output])
```
**Note**: Usually the barrier is given to a QueueRunner as in the
examples above. The QueueRunner will close the barrier if the prefetch_op
receives an OutOfRange Error from upstream input queues (i.e., reaches
the end of the input). If the barrier is closed no further new examples
are added to the SQSS. The underlying barrier might, however, still
contain further unroll-steps of examples that have not undergone all
iterations. To gracefully finish all examples, the flag
`allow_small_batch` must be set to true, which causes the SQSS to issue
progressively smaller mini-batches with the remaining examples.
"""
def __init__(self,
batch_size,
num_unroll,
input_length,
input_key,
input_sequences,
input_context,
initial_states,
capacity=None,
allow_small_batch=False,
name=None):
"""Creates the SequenceQueueingStateSaver.
Args:
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
input_length: An int32 scalar `Tensor`, the length of the sequence prior
to padding. This value may be at most `padded_length` for any given
input (see below for the definition of `padded_length`).
Batched and total lengths of the current iteration are made accessible
via the `length` and `total_length` properties. The shape of
input_length (scalar) must be fully specified.
input_key: A string scalar `Tensor`, the **unique** key for the given
input. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar)
must be fully specified.
input_sequences: A dict mapping string names to `Tensor` values. The
values must all have matching first dimension, called `padded_length`.
The `SequenceQueueingStateSaver` will split these tensors along
this first dimension into minibatch elements of dimension
`num_unroll`. Batched and segmented sequences of the current iteration
are made accessible via the `sequences` property.
**Note**: `padded_length` may be dynamic, and may vary from input
to input, but must always be a multiple of `num_unroll`. The remainder
of the shape (other than the first dimension) must be fully specified.
input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
accessible via the `context` property.
**Note**: All input_context values must have fully defined shapes.
initial_states: A dict mapping string state names to multi-dimensional
values (e.g. constants or tensors). This input defines the set of
states that will be kept track of during computing iterations, and
which can be accessed via the `state` and `save_state` methods.
**Note**: All initial_state values must have fully defined shapes.
capacity: The max capacity of the SQSS in number of examples. Needs to be
at least `batch_size`. Defaults to unbounded.
allow_small_batch: If true, the SQSS will return smaller batches when
there aren't enough input examples to fill a whole batch and the end of
the input has been reached (i.e., the underlying barrier has been
closed).
name: An op name string (optional).
Raises:
TypeError: if any of the inputs is not an expected type.
ValueError: if any of the input values is inconsistent, e.g. if
not enough shape information is available from inputs to build
the state saver.
"""
if capacity is not None and isinstance(batch_size, ops.Tensor):
with ops.control_dependencies([check_ops.assert_greater_equal(
math_ops.cast(capacity, dtype=dtypes.int64),
math_ops.cast(batch_size, dtype=dtypes.int64),
message="capacity needs to be >= batch_size.")]):
input_key = array_ops.identity(input_key)
elif capacity is not None and capacity < batch_size:
raise ValueError("capacity %d needs to be >= batch_size %d" % (
capacity, batch_size))
# The barrier is ignorant of the number of actual examples, since a long
# example that requires many iterations produces more elements in the
# barrier than a short example. Furthermore, we don't have an upper bound
# on the length of examples, and hence have to keep the capacity of the
# barrier at infinite to avoid dead-lock. Instead we have to keep track of
# the number of active examples in this class, and block the prefetch_op
# when capacity is reached. To this end, we employ a FIFOQueue in which we
# store one token (its value doesn't matter) for each input example, and
# dequeue a token for each completed example. Since the capacity of this
# queue is limited the enqueue operation will block if capacity is reached.
self._capacity_queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=[dtypes.int32], shapes=[[]])
# Place all operations on the CPU. Barriers and queues are only implemented
# for CPU, but all the other book-keeping operations
# (reshape, shape, range, ...) would be placed on GPUs if available,
# unless we explicitly tie them to CPU.
with ops.colocate_with(self._capacity_queue.queue_ref):
if not isinstance(initial_states, dict):
raise TypeError("initial_states must be a dictionary")
if not initial_states:
raise ValueError(
"initial_states may not be empty: at least one state variable is "
"required to properly enqueue split sequences to run in separate "
"iterations")
for k in initial_states:
if not isinstance(k, six.string_types):
raise TypeError("state name must be a string: %s" % k)
if ":" in k:
raise ValueError("state name may not have a colon: '%s'" % k)
op_vars = ([input_length, input_key] + list(input_sequences.values()) +
list(input_context.values()))
with ops.name_scope(name, "InputQueueingStateSaver", op_vars) as scope:
inputs = _SequenceInputWrapper(input_length, input_key, input_sequences,
input_context)
self._batch_size = batch_size
self._num_unroll = num_unroll
self._name = scope
# This step makes sure all shapes are well defined. We can now
# use get_shape() on any tensor in the output of this function
# and get a fully-defined shape.
(self._length, self._key, self._sorted_states, self._sorted_sequences,
self._sorted_context) = _prepare_sequence_inputs(inputs,
initial_states)
self._padded_length = array_ops.identity(
array_ops.shape(six.next(six.itervalues(self._sorted_sequences)))[
0],
name="padded_length") # The name is useful for debugging
self._padded_length = _check_multiple_of(self._padded_length,
self._num_unroll)
# sequences should have length == all matching
self._sorted_sequences = collections.OrderedDict(
(k, _check_dimensions(
v, [0], [self._padded_length],
debug_prefix="sorted_sequences_%s" % k))
for k, v in self._sorted_sequences.items())
self._uninitialized_states = self._sorted_states
# Once this is set, self._get_barrier_*_index are available for use.
self._store_index_maps(self._sorted_sequences, self._sorted_context,
self._sorted_states)
# Make sure that the length is <= the padded_length
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.less_equal(self._length, self._padded_length), [
"Input length should be <= than length from sequences:",
self._length, " vs. ", self._padded_length
])
]):
self._length = array_ops.identity(self._length)
# Only create barrier; enqueu and dequeue operations happen when you
# access prefetch_op and next_batch.
self._create_barrier()
self._scope = scope
self._allow_small_batch = allow_small_batch
self._prefetch_op = None
self._next_batch = None
@property
def name(self):
return self._name
@property
def barrier(self):
return self._barrier
@property
def batch_size(self):
return self._batch_size
@property
def num_unroll(self):
return self._num_unroll
@property
def prefetch_op(self):
"""The op used to prefetch new data into the state saver.
Running it once enqueues one new input example into the state saver.
The first time this gets called, it additionally creates the prefetch_op.
Subsequent calls simply return the previously created `prefetch_op`.
It should be run in a separate thread via e.g. a `QueueRunner`.
Returns:
An `Operation` that performs prefetching.
"""
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
return self._prefetch_op
@property
def next_batch(self):
"""The `NextQueuedSequenceBatch` providing access to batched output data.
Also provides access to the `state` and `save_state` methods.
The first time this gets called, it additionally prepares barrier reads
and creates `NextQueuedSequenceBatch` / next_batch objects. Subsequent
calls simply return the previously created `next_batch`.
In order to access data in `next_batch` without blocking, the `prefetch_op`
must have been run at least `batch_size` times (ideally in a separate
thread, or launched via a `QueueRunner`). After processing a segment in
`next_batch()`, `batch.save_state()` must be called which is done by the
state_saving_rnn. Without this call, the dequeue op associated with the SQSS
will not run.
Returns:
A cached `NextQueuedSequenceBatch` instance.
"""
# This is needed to prevent errors if next_batch is called before
# prefetch_op is created.
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
if not self._next_batch:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._prepare_barrier_reads()
return self._next_batch
def close(self, cancel_pending_enqueues=False, name=None):
"""Closes the barrier and the FIFOQueue.
This operation signals that no more segments of new sequences will be
enqueued. New segments of already inserted sequences may still be enqueued
and dequeued if there is a sufficient number filling a batch or
allow_small_batch is true. Otherwise dequeue operations will fail
immediately.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
`False`. If `True`, all pending enqueues to the underlying queues will
be cancelled, and completing already started sequences is not possible.
name: Optional name for the op.
Returns:
The operation that closes the barrier and the FIFOQueue.
"""
with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
barrier_close = self.barrier.close(cancel_pending_enqueues,
"BarrierClose")
fifo_queue_close = self._capacity_queue.close(cancel_pending_enqueues,
"FIFOClose")
return control_flow_ops.group(barrier_close, fifo_queue_close, name=name)
def _store_index_maps(self, sequences, context, states):
"""Prepares the internal dictionaries _name_to_index and _index_to_name.
These dictionaries are used to keep track of indices into the barrier.
Args:
sequences: `OrderedDict` of string, `Tensor` pairs.
context: `OrderedDict` of string, `Tensor` pairs.
states: `OrderedDict` of string, `Tensor` pairs.
"""
assert isinstance(sequences, dict)
assert isinstance(context, dict)
assert isinstance(states, dict)
self._name_to_index = dict(
(name, ix)
for (ix, name) in enumerate([
"__length", "__total_length", "__next_key", "__sequence",
"__sequence_count"
] + ["__sequence__%s" % k for k in sequences.keys()] + [
"__context__%s" % k for k in context.keys()
] + ["__state__%s" % k for k in states.keys()]))
self._index_to_name = [
name
for (name, _) in sorted(
self._name_to_index.items(), key=lambda n_ix: n_ix[1])
]
def _get_barrier_length_index(self):
return self._name_to_index["__length"]
def _get_barrier_total_length_index(self):
return self._name_to_index["__total_length"]
def _get_barrier_next_key_index(self):
return self._name_to_index["__next_key"]
def _get_barrier_sequence_index(self):
return self._name_to_index["__sequence"]
def _get_barrier_sequence_count_index(self):
return self._name_to_index["__sequence_count"]
def _get_barrier_index(self, index_type, name):
assert index_type in ("sequence", "context", "state")
key = "__%s__%s" % (index_type, name)
assert key in self._name_to_index, (
"Requested a name not in the value type %s: %s" % (index_type, name))
return self._name_to_index[key]
def _create_barrier(self):
"""Create the barrier.
This method initializes the Barrier object with the right types and shapes.
"""
# Create the barrier
sequence_dtypes = [v.dtype for k, v in self._sorted_sequences.items()]
context_dtypes = [v.dtype for k, v in self._sorted_context.items()]
state_dtypes = [v.dtype for k, v in self._sorted_states.items()]
types = ([
dtypes.int32, # length
dtypes.int32, # total_length
dtypes.string, # next_keys
dtypes.int32, # sequence
dtypes.int32
] # expanded_sequence_count
+ sequence_dtypes + context_dtypes + state_dtypes)
sequence_shapes = [
[self._num_unroll] + self._sorted_sequences[k].get_shape().as_list()[1:]
for k in self._sorted_sequences.keys()
]
context_shapes = [
self._sorted_context[k].get_shape().as_list()
for k in self._sorted_context.keys()
]
state_shapes = [
self._sorted_states[k].get_shape().as_list()
for k in self._sorted_states.keys()
]
shapes = ([
(), # length
(), # total_length
(), # next_keys
(), # sequence
()
] # expanded_sequence_count
+ sequence_shapes + context_shapes + state_shapes)
self._barrier = data_flow_ops.Barrier(types=types, shapes=shapes)
def _create_prefetch_op(self):
"""Group insert_many ops and create prefetch_op.
This method implements the "meat" of the logic underlying the
`SequenceQueueingStateSaver`. It performs dynamic reshaping of
sequences, copying of context, and initial insertion of these values,
as well as the key, next_key, sequence, sequence_count, and initial
states into the barrier.
"""
# Step 1: identify how many barrier entries to split this input
# into, store the result as a scalar
sequence_count = math_ops.div(self._padded_length, self._num_unroll)
sequence_count_vec = array_ops.expand_dims(sequence_count, 0)
# The final unrolled sequence's length is num_unroll only in
# the case that num_unroll divides it evenly.
ones = array_ops.ones(sequence_count_vec, dtype=dtypes.int32)
sequence = math_ops.range(sequence_count)
expanded_length = math_ops.maximum(
0, self._length - self._num_unroll * sequence)
expanded_length = math_ops.minimum(self._num_unroll, expanded_length)
expanded_total_length = self._length * ones
expanded_sequence_count = sequence_count * ones
current_keys = string_ops.string_join(
[
string_ops.as_string(
sequence, width=5, fill="0"), "_of_", string_ops.as_string(
sequence_count, width=5, fill="0"), ":", self._key
],
name="StringJoinCurrentKeys")
next_keys = array_ops.concat(
[
array_ops.slice(current_keys, [1], [-1]), array_ops.expand_dims(
string_ops.string_join(
["STOP:", self._key], name="StringJoinStop"),
0)
],
0,
name="concat_next_keys")
reshaped_sequences = collections.OrderedDict((
k,
_check_dimensions(
# Reshape sequences to sequence_count rows
array_ops.reshape(
v,
array_ops.concat(
[
array_ops.expand_dims(sequence_count, 0),
array_ops.expand_dims(self._num_unroll, 0),
v.get_shape().as_list()[1:]
],
0,
name="concat_sequences_%s" % k),
name="reshape_sequences_%s" % k),
[0, 1] + list(range(2, v.get_shape().ndims + 1)),
[sequence_count, self._num_unroll] + v.get_shape().as_list()[1:],
debug_prefix="reshaped_sequences_%s" %
k)) for k, v in self._sorted_sequences.items())
expanded_context = collections.OrderedDict(
(
k,
_check_dimensions(
# Copy context to be sequence_count rows
array_ops.tile(
array_ops.expand_dims(v, 0),
array_ops.concat(
[
array_ops.expand_dims(sequence_count, 0),
[1] * v.get_shape().ndims
],
0,
name="concat_context_%s" % k),
name="tile_context_%s" % k),
[0] + list(range(1, v.get_shape().ndims + 1)),
[sequence_count] + v.get_shape().as_list(),
debug_prefix="expanded_context_%s" % k))
for k, v in self._sorted_context.items())
# Storing into the barrier, for each current_key:
# sequence_ix, sequence_count, next_key, length,
# context... (copied), sequences... (truncated)
# Also storing into the barrier for the first key
# states (using initial_states).
insert_sequence_op = self._barrier.insert_many(
self._get_barrier_sequence_index(),
current_keys,
sequence,
name="BarrierInsertSequence")
insert_sequence_count_op = self._barrier.insert_many(
self._get_barrier_sequence_count_index(),
current_keys,
expanded_sequence_count,
name="BarrierInsertSequenceCount")
insert_next_key_op = self._barrier.insert_many(
self._get_barrier_next_key_index(),
current_keys,
next_keys,
name="BarrierInsertNextKey")
insert_length_op = self._barrier.insert_many(
self._get_barrier_length_index(),
current_keys,
expanded_length,
name="BarrierInsertLength")
insert_total_length_op = self._barrier.insert_many(
self._get_barrier_total_length_index(),
current_keys,
expanded_total_length,
name="BarrierInsertTotalLength")
insert_context_ops = dict((name, self._barrier.insert_many(
self._get_barrier_index("context", name),
current_keys,
value,
name="BarrierInsertContext_%s" % name))
for (name, value) in expanded_context.items())
insert_sequences_ops = dict((name, self._barrier.insert_many(
self._get_barrier_index("sequence", name),
current_keys,
value,
name="BarrierInsertSequences_%s" % name))
for (name, value) in reshaped_sequences.items())
# An op that blocks if we reached capacity in number of active examples.
TOKEN_WITH_IGNORED_VALUE = 21051976 # pylint: disable=invalid-name
insert_capacity_token_op = self._capacity_queue.enqueue(
(TOKEN_WITH_IGNORED_VALUE,))
# Insert just the initial state. Specifically force this to run
# the insert sequence op *first* so that the Barrier receives
# an insert with *all* the segments and the segments all get the same index.
with ops.control_dependencies(
[insert_sequence_op, insert_capacity_token_op]):
insert_initial_state_ops = dict(
(name, self._barrier.insert_many(
self._get_barrier_index("state", name),
array_ops.stack([current_keys[0]]),
array_ops.stack([value]),
name="BarrierInitialInsertState_%s" % name))
for (name, value) in self._uninitialized_states.items())
all_inserts = ([
insert_capacity_token_op, insert_sequence_op, insert_sequence_count_op,
insert_next_key_op, insert_length_op, insert_total_length_op
] + list(insert_initial_state_ops.values()) +
list(insert_context_ops.values()) +
list(insert_sequences_ops.values()))
self._prefetch_op = control_flow_ops.group(
*all_inserts, name="StateSaverPrefetchGroup")
def _prepare_barrier_reads(self):
"""Creates ops for reading the barrier, as used by properties like `length`.
"""
# Ops for reading from the barrier. These ops must be run in a
# different thread than the prefetcher op to avoid blocking.
received = self._barrier.take_many(
self._batch_size, self._allow_small_batch, name="BarrierTakeMany")
self._received_indices = received[0]
self._received_keys = received[1]
received_values = received[2]
self._received_sequence = received_values[self._get_barrier_sequence_index(
)]
self._received_sequence_count = received_values[
self._get_barrier_sequence_count_index()]
self._received_next_key = received_values[self._get_barrier_next_key_index(
)]
self._received_length = received_values[self._get_barrier_length_index()]
self._received_total_length = received_values[
self._get_barrier_total_length_index()]
self._received_context = collections.OrderedDict(
(name, received_values[self._get_barrier_index("context", name)])
for name in self._sorted_context.keys())
self._received_sequences = collections.OrderedDict(
(name, received_values[self._get_barrier_index("sequence", name)])
for name in self._sorted_sequences.keys())
self._received_batch_size = array_ops.squeeze(
array_ops.shape(self._received_length))
# Which examples are we done with?
self._sequence_is_done = (
self._received_sequence + 1 >= self._received_sequence_count)
# Compute the number of finished sequences and dequeue as many tokens from
# the capacity queue.
finished_sequences = (math_ops.reduce_sum(
math_ops.cast(self._sequence_is_done, dtypes.int32)))
# TODO(ebrevdo): convert to dequeue_up_to when FIFOQueue supports it.
dequeue_op = self._capacity_queue.dequeue_many(finished_sequences)
# Tie the dequeue_op to the received_state, such that it is definitely
# carried out.
with ops.control_dependencies([dequeue_op]):
self._received_states = collections.OrderedDict(
(name, array_ops.identity(received_values[self._get_barrier_index(
"state", name)])) for name in self._sorted_states.keys())
self._next_batch = NextQueuedSequenceBatch(self)
def batch_sequences_with_states(input_key,
input_sequences,
input_context,
input_length,
initial_states,
num_unroll,
batch_size,
num_threads=3,
capacity=1000,
allow_small_batch=True,
pad=True,
make_keys_unique=False,
make_keys_unique_seed=None,
name=None):
"""Creates batches of segments of sequential input.
This method creates a `SequenceQueueingStateSaver` (SQSS) and adds it to
the queuerunners. It returns a `NextQueuedSequenceBatch`.
It accepts one example at a time identified by a unique `input_key`.
`input_sequence` is a dict with values that are tensors with time as first
dimension. This time dimension must be the same across those tensors of an
example. It can vary across examples. Although it always has to be a multiple
of `num_unroll`. Hence, padding may be necessary and it is turned on by
default by `pad=True`.
`input_length` is a Tensor scalar or an int recording the time dimension prior
to padding. It should be between 0 and the time dimension. One reason we want
to keep track of it is so that we can take it into consideration when
computing the loss. If `pad=True` then `input_length` can be `None` and will
be inferred.
This methods segments `input_sequence` into segments of length `num_unroll`.
It batches input sequences from `batch_size` many examples. These mini-batches
are available through the `sequence` property of the output. Moreover, for
each entry in the batch we can access its original `input_key` in `key` and
its input length in `total_length`. `length` records within this segment how
many non-padded time steps there are.
Static features of an example that do not vary across time can be part of the
`input_context`, a dict with Tensor values. This method copies the context for
each segment and makes it available in the `context` of the output.
This method can maintain and update a state for each example. It accepts some
initial_states as a dict with Tensor values. The first mini-batch an example
is contained has initial_states as entry of the `state`. If save_state is
called then the next segment will have the updated entry of the `state`.
See `NextQueuedSequenceBatch` for a complete list of properties and methods.
Example usage:
```python
batch_size = 32
num_unroll = 20
num_enqueue_threads = 3
lstm_size = 8
cell = tf.contrib.rnn.BasicLSTMCell(num_units=lstm_size)
key, sequences, context = my_parser(raw_data)
initial_state_values = tf.zeros((state_size,), dtype=tf.float32)
initial_states = {"lstm_state": initial_state_values}
batch = tf.batch_sequences_with_states(
input_key=key,
input_sequences=sequences,
input_context=context,
input_length=tf.shape(sequences["input"])[0],
initial_states=initial_states,
num_unroll=num_unroll,
batch_size=batch_size,
num_threads=num_enqueue_threads,
capacity=batch_size * num_enqueue_threads * 2)
inputs = batch.sequences["input"]
context_label = batch.context["label"]
inputs_by_time = tf.split(value=inputs, num_or_size_splits=num_unroll, axis=1)
assert len(inputs_by_time) == num_unroll
lstm_output, _ = tf.contrib.rnn.static_state_saving_rnn(
cell,
inputs_by_time,
state_saver=batch,
state_name="lstm_state")
# Start a prefetcher in the background
sess = tf.Session()
tf.train.start_queue_runners(sess=session)
while True:
# Step through batches, perform training or inference...
session.run([lstm_output])
```
Args:
input_key: A string scalar `Tensor`, the **unique** key for the given
input example. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar) must
be fully specified. Consider setting `make_keys_unique` to True when
iterating over the same input multiple times.
**Note**: if `make_keys_unique=False` then `input_key`s must be unique.
input_sequences: A dict mapping string names to `Tensor` values. The values
must all have matching first dimension, called `value_length`. They may
vary from input to input. The remainder of the shape (other than the first
dimension) must be fully specified.
The `SequenceQueueingStateSaver` will split these tensors along
this first dimension into minibatch elements of dimension `num_unrolled`.
Batched and segmented sequences of the current iteration are made
accessible via the `sequences` property.
**Note**: if `pad=False`, then `value_length` must always be a multiple
of `num_unroll`.
input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input example,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
accessible via the `context` property.
**Note**: All input_context values must have fully defined shapes.
input_length: None or an int32 scalar `Tensor`, the length of the sequence
prior to padding. If `input_length=None` and `pad=True` then the length
will be inferred and will be equal to `value_length`. If `pad=False` then
`input_length` cannot be `None`: `input_length` must be specified. Its
shape of `input_length` (scalar) must be fully specified. Its value may be
at most `value_length` for any given input (see above for the definition
of `value_length`). Batched and total lengths of the current iteration are
made accessible via the `length` and `total_length` properties.
initial_states: A dict mapping string state names to multi-dimensional
values (e.g. constants or tensors). This input defines the set of
states that will be kept track of during computing iterations, and
which can be accessed via the `state` and `save_state` methods.
**Note**: All initial_state values must have fully defined shapes.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length k are then split into k / num_unroll many
segments.
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_threads: The int number of threads enqueuing input examples into a
queue.
capacity: The max capacity of the queue in number of examples. Needs to be
at least `batch_size`. Defaults to 1000. When iterating over the same
input example multiple times reusing their keys the `capacity` must be
smaller than the number of examples.
allow_small_batch: If true, the queue will return smaller batches when
there aren't enough input examples to fill a whole batch and the end of
the input has been reached.
pad: If `True`, `input_sequences` will be padded to multiple of
`num_unroll`. In that case `input_length` may be `None` and is assumed to
be the length of first dimension of values in `input_sequences`
(i.e. `value_length`).
make_keys_unique: Whether to append a random integer to the `input_key` in
an effort to make it unique. The seed can be set via
`make_keys_unique_seed`.
make_keys_unique_seed: If `make_keys_unique=True` this fixes the seed with
which a random postfix is generated.
name: An op name string (optional).
Returns:
A NextQueuedSequenceBatch with segmented and batched inputs and their
states.
Raises:
TypeError: if any of the inputs is not an expected type.
ValueError: if any of the input values is inconsistent, e.g. if
not enough shape information is available from inputs to build
the state saver.
"""
tensor_list = (list(input_sequences.values()) + list(input_context.values()) +
list(initial_states.values()))
with ops.name_scope(name, "batch_sequences_with_states", tensor_list) as name:
if pad:
length, input_sequences = _padding(input_sequences, num_unroll)
input_length = input_length if input_length is not None else length
elif input_sequences:
# Assert that value_length is a multiple of num_unroll.
checked_input_sequences = {}
for key, value in input_sequences.items():
if (isinstance(value, sparse_tensor.SparseTensor) or
isinstance(value, sparse_tensor.SparseTensorValue)):
value_length = value.dense_shape[0]
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(value_length % num_unroll, 0),
math_ops.not_equal(value_length, 0)),
[
string_ops.string_join([
"SparseTensor %s first dimension should be a "
"multiple of: " % key,
string_ops.as_string(num_unroll),
", but saw value: ",
string_ops.as_string(value_length),
". Consider setting pad=True."])])]):
checked_input_sequences[key] = sparse_tensor.SparseTensor(
indices=array_ops.identity(
value.indices, name="multiple_of_checked"),
values=array_ops.identity(
value.values, name="multiple_of_checked"),
dense_shape=array_ops.identity(
value.dense_shape, name="multiple_of_checked"))
else:
if not isinstance(value, ops.Tensor):
try:
value = ops.convert_to_tensor(value)
except TypeError:
raise TypeError(
"Unsupported input_sequences expected Tensor or SparseTensor "
"values, got: %s for key %s" % (str(type(value)), key))
value_length = array_ops.shape(value)[0]
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(value_length % num_unroll, 0),
math_ops.not_equal(value_length, 0)),
[
string_ops.string_join([
"Tensor %s first dimension should be a multiple "
"of: " % key,
string_ops.as_string(num_unroll),
", but saw value: ",
string_ops.as_string(value_length),
". Consider setting pad=True."
])
])
]):
checked_input_sequences[key] = array_ops.identity(
value, name="multiple_of_checked")
input_sequences = checked_input_sequences
# Move SparseTensors in context into input_sequences.
_move_sparse_tensor_out_context(input_context, input_sequences, num_unroll)
# Deconstruct SparseTensors in sequence into a dense Tensor before inputting
# to SQSS.
(transformed_input_seq,
sparse_tensor_keys,
tensor_list) = _deconstruct_sparse_tensor_seq(input_sequences)
if make_keys_unique:
input_key = string_ops.string_join([
input_key,
string_ops.as_string(
random_ops.random_uniform(
(), minval=0, maxval=100000000, dtype=dtypes.int32,
seed=make_keys_unique_seed))])
# setup stateful queue reader
stateful_reader = SequenceQueueingStateSaver(
batch_size,
num_unroll,
input_length=input_length,
input_key=input_key,
input_sequences=transformed_input_seq,
input_context=input_context,
initial_states=initial_states,
capacity=capacity,
allow_small_batch=allow_small_batch)
barrier = stateful_reader.barrier
summary.scalar("queue/%s/ready_segment_batches_" % barrier.name,
math_ops.cast(barrier.ready_size(), dtypes.float32))
q_runner = queue_runner.QueueRunner(
stateful_reader, [stateful_reader.prefetch_op] * num_threads,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError))
queue_runner.add_queue_runner(q_runner)
batch = stateful_reader.next_batch
# Reconstruct SparseTensors in sequence.
_reconstruct_sparse_tensor_seq(
batch.sequences,
sparse_tensor_keys,
tensor_list,
batch_size,
num_unroll)
# Move select SparseTensors back to context.
_move_sparse_tensor_in_context(batch.context, batch.sequences)
return batch
def _padding(sequences, num_unroll):
"""For a dictionary of sequences, pads tensors to a multiple of `num_unroll`.
Args:
sequences: dictionary with `Tensor` values.
num_unroll: int specifying to what multiple to pad sequences to.
Returns:
length: Scalar `Tensor` of dimension 0 of all the values in sequences.
padded_sequence: Dictionary of sequences that are padded to a multiple of
`num_unroll`.
Raises:
ValueError: If `num_unroll` not an int or sequences not a dictionary from
string to `Tensor`.
"""
if not isinstance(num_unroll, numbers.Integral):
raise ValueError("Unsupported num_unroll expected int, got: %s" %
str(num_unroll))
if not isinstance(sequences, dict):
raise TypeError("Unsupported sequences expected dict, got: %s" %
str(sequences))
for key, value in sequences.items():
if not isinstance(key, six.string_types):
raise TypeError("Unsupported sequences key expected string, got: %s" %
str(key))
if not sequences:
return 0, {}
sequences_dict = {}
for key, value in sequences.items():
if not (isinstance(value, sparse_tensor.SparseTensor) or
isinstance(value, sparse_tensor.SparseTensorValue)):
sequences_dict[key] = ops.convert_to_tensor(value)
else:
sequences_dict[key] = value
lengths = [array_ops.shape(value)[0] for value in sequences_dict.values()
if isinstance(value, ops.Tensor)]
if lengths:
length = lengths[0]
all_lengths_equal = [
control_flow_ops.Assert(
math_ops.equal(l, length), [string_ops.string_join(
["All sequence lengths must match, but received lengths: ",
string_ops.as_string(lengths)])])
for l in lengths]
length = control_flow_ops.with_dependencies(all_lengths_equal, length)
else: # Only have SparseTensors
sparse_lengths = [value.dense_shape[0] for value in sequences_dict.values()
if isinstance(value, sparse_tensor.SparseTensor)]
length = math_ops.maximum(sparse_lengths)
unroll = array_ops.constant(num_unroll)
padded_length = length + ((unroll - (length % unroll)) % unroll)
padded_sequences = {}
for key, value in sequences_dict.items():
if isinstance(value, ops.Tensor):
# 1. create shape of paddings
# first dimension of value will be increased by num_paddings to
# padded_length
num_paddings = [padded_length - array_ops.shape(value)[0]]
# the shape of the paddings that we concat with the original value will be
# [num_paddings, tf.shape(value)[1], tf.shape(value)[2], ...,
# tf.shape(value)[tf.rank(value) - 1])]
padding_shape = array_ops.concat(
(num_paddings, array_ops.shape(value)[1:]), 0)
# 2. fill padding shape with dummies
dummy = array_ops.constant(
"" if value.dtype == dtypes.string else 0, dtype=value.dtype)
paddings = array_ops.fill(dims=padding_shape, value=dummy)
# 3. concat values with paddings
padded_sequences[key] = array_ops.concat([value, paddings], 0)
else:
padded_shape = array_ops.concat([[math_ops.to_int64(padded_length)],
value.dense_shape[1:]], 0)
padded_sequences[key] = sparse_tensor.SparseTensor(
indices=value.indices,
values=value.values,
dense_shape=padded_shape)
return length, padded_sequences
_SPARSE_CONTEXT_PREFIX_KEY = "_context_in_seq_"
def _move_sparse_tensor_out_context(input_context, input_sequences, num_unroll):
"""Moves `SparseTensor`s from `input_context` into `input_sequences` as seq.
For `key, value` pairs in `input_context` with `SparseTensor` `value` removes
them from `input_context` and transforms the `value` into a sequence and
then adding `key`, transformed `value` into `input_seuqences`.
The transformation is done by adding a new first dimension of `value_length`
equal to that of the other values in input_sequences` and tiling the `value`
every `num_unroll` steps.
Args:
input_context: dictionary with `Tensor` or `SparseTensor` values. To be
modified to take out `SparseTensor` values.
input_sequences: dictionary with `Tensor` or `SparseTensor` values. To be
modified to add transformed `SparseTensor` values from `input_context`.
num_unroll: int specifying to what multiple to pad sequences to.
"""
value_length = array_ops.constant(1)
if input_sequences:
seq = list(input_sequences.values())[0]
if isinstance(seq, ops.Tensor):
value_length = array_ops.shape(seq)[0]
else:
value_length = seq.dense_shape[0]
value_length = math_ops.cast(value_length, dtype=dtypes.int64)
def _copy_sparse_tensor(sp_tensor):
"""Operation to tile a sparse tensor along a newly added 0 dimension.
Adding a new first dimension of `value_length` and tiling the `sp_tensor`
every `num_unroll` steps.
Args:
sp_tensor: `SparseTensor`.
Returns:
`SparseTensor` sequence with `sp_tensor` tiled.
"""
n = value_length // num_unroll
n = math_ops.cast(n, dtype=dtypes.int32)
values = array_ops.tile(sp_tensor.values, array_ops.expand_dims(n, 0))
shape = array_ops.concat(
[array_ops.expand_dims(value_length, 0), sp_tensor.dense_shape], 0)
# Construct new indices by multiplying old ones and prepending [0, n).
# First multiply indices n times along a newly created 0-dimension.
multiplied_indices = array_ops.tile(
array_ops.expand_dims(sp_tensor.indices, 0),
array_ops.stack([n, 1, 1]))
# Construct indicator for [0, n).
# [ [ [0] [0] ... [0] ]
# [ [num_unroll] [num_unroll] ... [num_unroll] ]
# ...
# [ [num_unroll*(n-1)] [num_unroll*(n-1)] ... [num_unroll*(n-1)] ] ]
# of shape [n, shape(sp_tensor.indices)[0], 1]
# Get current dimensions of indices.
dim0 = array_ops.shape(sp_tensor.indices)[0]
dim1 = array_ops.shape(sp_tensor.indices)[1]
ind = math_ops.range(start=0, limit=value_length, delta=num_unroll)
# ind.set_shape([n])
ind = array_ops.expand_dims(ind, 1)
ind = array_ops.expand_dims(ind, 2)
ind = array_ops.tile(ind, [1, dim0, 1])
# Concatenate both and reshape.
indices = array_ops.concat([ind, multiplied_indices], 2)
indices = array_ops.reshape(indices, [dim0 * n, dim1 + 1])
return sparse_tensor.SparseTensor(indices=indices,
values=values,
dense_shape=shape)
sparse_tensor_keys = [
k for k in sorted(input_context.keys())
if (isinstance(input_context[k], sparse_tensor.SparseTensor) or
isinstance(input_context[k], sparse_tensor.SparseTensorValue))]
for key in sparse_tensor_keys:
input_sequences[_SPARSE_CONTEXT_PREFIX_KEY + key] = _copy_sparse_tensor(
input_context[key])
del input_context[key]
def _move_sparse_tensor_in_context(context, sequences):
sparse_tensor_keys = [
k for k in sorted(sequences) if k.startswith(_SPARSE_CONTEXT_PREFIX_KEY)]
for key in sparse_tensor_keys:
new_key = key[len(_SPARSE_CONTEXT_PREFIX_KEY):]
sp_tensor = sequences[key]
# Take out time dimension.
sp_tensor = sparse_tensor.SparseTensor(
sp_tensor.indices, # with only 0s at column 1 representing time.
sp_tensor.values,
array_ops.concat(
[[sp_tensor.dense_shape[0]], # batch
[1], # time
sp_tensor.dense_shape[2:]], # SparseTensor shape prior to batching
0))
new_shape = array_ops.concat(
[[sp_tensor.dense_shape[0]], sp_tensor.dense_shape[2:]], 0)
context[new_key] = sparse_ops.sparse_reshape(sp_tensor, new_shape)
del sequences[key]
def _deconstruct_sparse_tensor_seq(input_sequence, shared_name=None):
"""Converts `SparseTensor` values into `Tensors` of IDs and meta data.
Given a dict of keys -> `Tensor` or `SparseTensor` transforms the
`SparseTensor` values into `Tensor` values of IDs by calling `_store_sparse`.
The IDs are pointers into and underlying `SparseTensorsMap` that is being
constructed. Additional meta data is returned in order to be able to
reconstruct `SparseTensor` values after batching and segmenting the IDs
`Tensor`.
Args:
input_sequence: dictionary with `Tensor` or `SparseTensor` values.
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
Returns:
A tuple `(sequence, sparse_tensor_keys, tensor_list)` where `sequence` is
dictionary with the same keys as `input_sequence` but only `Tensor` values,
`sparse_tensor_keys` is a list of the keys of the `SparseTensor` values that
were converted, and `tensor_list` is a list of the same length with
`Tensor` objects.
"""
sparse_tensor_keys = [
k for k in sorted(input_sequence.keys())
if (isinstance(input_sequence[k], sparse_tensor.SparseTensor) or
isinstance(input_sequence[k], sparse_tensor.SparseTensorValue))]
if not sparse_tensor_keys:
return input_sequence, None, sparse_tensor_keys
sparse_tensor_list = [input_sequence[k] for k in sparse_tensor_keys]
tensor_list = [_store_sparse(sp_tensor, shared_name=shared_name)
for sp_tensor in sparse_tensor_list]
transformed_input_seq = dict(input_sequence)
tensor_op_list = []
for i, k in enumerate(sparse_tensor_keys):
transformed_input_seq[k] = tensor_list[i]
tensor_op_list += [tensor_list[i].op]
return transformed_input_seq, sparse_tensor_keys, tensor_op_list
def _reconstruct_sparse_tensor_seq(sequence,
sparse_tensor_keys,
tensor_op_list,
batch_size,
num_unroll):
"""Inverse of _deconstruct_sparse_tensor_seq.
Given a dict of keys -> `Tensor` reconstructs `SparseTensor` values for keys
in `sparse_tensor_keys`. Their `Tensor` values are assumed to be IDs into the
underlying `SparseTensorsMap`. The `dense_shape` of the `SparseTensor`s is
`[batch_size, num_unroll, d_0, d_1, ..., d_n]` when the original
`SparseTensor` that got deconstructed with `_deconstruct_sparse_tensor_seq`
has a `dense_shape` of `[None, d_0, d_1, ..., d_n]`.
Args:
sequence: dictionary with only `Tensor` values that is being updated.
sparse_tensor_keys: list of the keys present in `sequence` identifying
`SparseTensor` values that should be reconstructed.
tensor_op_list: list of the same length as `sparse_tensor_keys` with
`Tensor` objects.
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be.
num_unroll: Python integer, how many time steps were unrolled at a time.
"""
def _flatten_tensor(tensor):
"""Flattens `Tensor` of `shape [batch_size, num_unroll]` into 1D `Tensor`.
The main use of this function is to work around the limitation of
`_restore_sparse` to only accept 1D handles.
Args:
tensor: 2D `Tensor` of `shape [batch_size, num_unroll]`
Returns:
1D `Tensor`.
"""
return array_ops.reshape(tensor, [-1])
def _unflatten_sparse_tensor(sp_tensor):
"""Recreates `[batch_size, num_unroll]` dimensions in the `SparseTensor`.
Counter-part of `_flatten_tensor` which is called on the input of
`_restore_sparse` while this method is called on the output of it.
Together they work around the limitation of `_restore_sparse` to only
accept 1D handles.
The `indices` in `sp_tensor` is a 2D `Tensor` of `shape [N, ndims]`, where
`N` is the number of `values` and `ndims` is the number of dimension in its
dense counterpart. Among `ndims` the first entry corresponds to the batch
dimension `[0, num_unroll * batch_size)` from which we need to recreate the
2 dimensions `batch_size` and `num_unroll`.
The reason this reconstruction works is because the output of
`_restore_sparse` despite being a `SparseTensor` is actually dense w.r.t.
that first entry.
Args:
sp_tensor: A SparseTensor.
Returns:
A SparseTensor with a +1 higher rank than the input.
"""
idx_batch = math_ops.to_int64(
math_ops.floor(sp_tensor.indices[:, 0] / num_unroll))
idx_time = math_ops.mod(sp_tensor.indices[:, 0], num_unroll)
indices = array_ops.concat(
[
array_ops.expand_dims(idx_batch, 1),
array_ops.expand_dims(idx_time, 1), sp_tensor.indices[:, 1:]
],
axis=1)
dense_shape = array_ops.concat(
[[math_ops.cast(batch_size, dtype=dtypes.int64)],
[math_ops.cast(num_unroll, dtype=dtypes.int64)],
sp_tensor.dense_shape[1:]], axis=0)
return sparse_tensor.SparseTensor(
indices=indices,
values=sp_tensor.values,
dense_shape=dense_shape)
if not sparse_tensor_keys:
return
tensor_list = [sequence[k] for k in sparse_tensor_keys]
sp_tensors = [
_restore_sparse(sparse_map_op=i,
# Flatten the 2D Tensor [batch_size, num_unroll] of
# handles to a 1D Tensor.
# Reconstruct the dimensions later.
# TODO(b/34247140): Remove this workaround.
sparse_handles=_flatten_tensor(s), rank=None)
for i, s in zip(tensor_op_list, tensor_list)]
num_unroll = ops.convert_to_tensor(num_unroll, dtype=dtypes.int64,
name="num_unroll_int64")
# Recreate the [batch_size, num_unroll] dimensions in the SparseTensors.
# The dense_shape will have a +1 higher rank.
# TODO(b/34247140): Remove this workaround.
sp_tensors_higher_dim = [_unflatten_sparse_tensor(s) for s in sp_tensors]
# Set values to SparseTensors for sparse_tensor_keys.
for i, key in enumerate(sparse_tensor_keys):
sequence[key] = sp_tensors_higher_dim[i]
return
|
raymondxyang/tensorflow
|
tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
|
Python
|
apache-2.0
| 78,772
|
# ----------------------------------------------------------------------------
# Copyright 2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# ------------------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see fast-rcnn/LICENSE for details]
# Written by Ross Girshick
# ------------------------------------------------------------------
from pycuda.compiler import SourceModule
from pycuda.tools import context_dependent_memoize
"""
CUDA kernels for ROI pooling layers.
There is a fprop function, a bprop function.
The fprop and bprop CUDA-C code are adapted from Fast R-CNN model.
Each of the kernels uses templating to perform %(type)
conversion so it works for all data types (currently fp32 and fp16 are supported).
"""
def map_string2func(funcname, clss):
"""
Helper function that converts string function names to function calls
"""
if funcname == "fprop_roipooling":
return _get_fprop_roipooling(clss)
if funcname == "bprop_roipooling":
return _get_bprop_roipooling(clss)
raise AttributeError("kernel type '" + funcname + "' not understood")
# This section of the code contains templated CUDA-C code for the kernels.
@context_dependent_memoize
def _get_fprop_roipooling(clss):
code = r"""
#define FLT_MAX 3.402823466E+38F
__global__ void fprop_roipooling(const int nthreads,
const int num_rois, const int img_count,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const float* bottom_data, const float* bottom_rois, float* top_data,
int* argmax_data, const float spatial_scale) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; \
index < (nthreads); index += blockDim.x * gridDim.x){
// (c, ph, pw, n) is an element in the pooled output
int n = index % num_rois;
int pw = (index / num_rois) % pooled_width;
int ph = (index / num_rois / pooled_width) % pooled_height;
int c = index / num_rois / pooled_width / pooled_height;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
float bin_size_h = static_cast<float>(roi_height)
/ static_cast<float>(pooled_height);
float bin_size_w = static_cast<float>(roi_width)
/ static_cast<float>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<float>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<float>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<float>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<float>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
float maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += c * height * width * img_count;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width * img_count + w * img_count + roi_batch_ind;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
// Notice the maxidx (from bottom_index) is relative to the dimension
// (h, w, img_count) of the feature map, so max value is HWN
}
}
"""
module = SourceModule(code)
kernel = module.get_function("fprop_roipooling")
sig = "8I 4P 1f"
kernel.prepare(sig)
return kernel
# This section of the code contains templated CUDA-C code for the kernels.
@context_dependent_memoize
def _get_bprop_roipooling(clss):
code = r"""
__global__ void bprop_roipooling(const int nthreads,
const int num_rois, const int img_count,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const float* top_diff, const float* bottom_rois, float* bottom_diff,
const int* argmax_data, const float spatial_scale) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; \
index < (nthreads); index += blockDim.x * gridDim.x){
// (c, h, w, n) coords in bottom data on feature map
int n = index % img_count;
int w = (index / img_count) % width;
int h = (index / img_count / width) % height;
int c = index / img_count/ width / height;
float gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const float* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = c * pooled_height * pooled_width * num_rois;
const float* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
float bin_size_h = static_cast<float>(roi_height)
/ static_cast<float>(pooled_height);
float bin_size_w = static_cast<float>(roi_width)
/ static_cast<float>(pooled_width);
int phstart = floor(static_cast<float>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<float>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<float>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<float>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int top_index = ph * pooled_width * num_rois + pw * num_rois + roi_n;
int bottom_index = h * width * img_count + w * img_count + roi_batch_ind;
if (offset_argmax_data[top_index] == bottom_index) {
gradient += offset_top_diff[top_index];
}
}
}
}
bottom_diff[index] = gradient;
}
}
"""
module = SourceModule(code)
kernel = module.get_function("bprop_roipooling")
sig = "8I 4P 1f"
kernel.prepare(sig)
return kernel
|
coufon/neon-distributed
|
neon/backends/kernels/cuda/roipooling.py
|
Python
|
apache-2.0
| 9,019
|
#!/router/bin/python
import outer_packages
#from trex_stl_lib.trex_stl_hltapi import CTRexHltApi, CStreamsPerPort
from trex_stl_lib.trex_stl_hltapi import *
import traceback
import sys, time
from pprint import pprint
import argparse
def error(err = None):
if not err:
raise Exception('Unknown exception, look traceback')
if type(err) is str and not err.startswith('[ERR]'):
err = '[ERR] ' + err
print err
sys.exit(1)
def check_res(res):
if res['status'] == 0:
error('Encountered error:\n%s' % res['log'])
return res
def print_brief_stats(res):
title_str = ' '*3
tx_str = 'TX:'
rx_str = 'RX:'
for port_id, stat in res.iteritems():
if type(port_id) is not int:
continue
title_str += ' '*10 + 'Port%s' % port_id
tx_str += '%15s' % res[port_id]['aggregate']['tx']['total_pkts']
rx_str += '%15s' % res[port_id]['aggregate']['rx']['total_pkts']
print(title_str)
print(tx_str)
print(rx_str)
def wait_with_progress(seconds):
for i in range(0, seconds):
time.sleep(1)
sys.stdout.write('.')
sys.stdout.flush()
print('')
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(description='Example of using stateless TRex via HLT API.', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-v', dest = 'verbose', default = 0, help='Stateless API verbosity:\n0: No prints\n1: Commands and their status\n2: Same as 1 + ZMQ in&out')
parser.add_argument('--device', dest = 'device', default = 'localhost', help='Address of TRex server')
args = parser.parse_args()
hlt_client = CTRexHltApi(verbose = int(args.verbose))
print('Connecting to %s...' % args.device)
res = check_res(hlt_client.connect(device = args.device, port_list = [0, 1], username = 'danklei', break_locks = True, reset = True))
port_handle = res['port_handle']
print('Connected, got port handles %s' % port_handle)
ports_streams_dict = CStreamsPerPort()
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_config(mode = 'create', l2_encap = 'ethernet_ii_vlan', rate_pps = 1,
l3_protocol = 'ipv4',
#length_mode = 'imix', l3_length = 200,
ipv6_dst_mode = 'decrement', ipv6_dst_count = 300, ipv6_dst_addr = 'fe80:0:0:0:0:0:0:000f',
port_handle = port_handle, port_handle2 = port_handle[1],
#save_to_yaml = '/tmp/d1.yaml',
#stream_id = 1,
)
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_control(action = 'run')
print hlt_client.traffic_control(action = 'poll')
wait_with_progress(2)
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_control(action = 'stop')
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_stats(mode = 'aggregate')
print hlt_client.traffic_control(action = 'clear_stats')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
#print res
#print hlt_client._streams_history
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[1])
#ports_streams_dict.add_streams_from_res(res)
sys.exit(0)
res = check_res(hlt_client.traffic_config(mode = 'create', l2_encap = 'ethernet_ii_vlan', rate_pps = 1,
port_handle = port_handle[0], port_handle2 = port_handle[1], save_to_yaml = '/tmp/d1.yaml',
l4_protocol = 'udp',
#udp_src_port_mode = 'decrement',
#udp_src_port_count = 10, udp_src_port = 5,
))
ports_streams_dict.add_streams_from_res(res)
sys.exit(0)
#print ports_streams_dict
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
res = check_res(hlt_client.traffic_config(mode = 'modify', port_handle = port_handle[0], stream_id = ports_streams_dict[0][0],
mac_src = '1-2-3:4:5:6', l4_protocol = 'udp', save_to_yaml = '/tmp/d2.yaml'))
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
#print hlt_client._streams_history
res = check_res(hlt_client.traffic_config(mode = 'modify', port_handle = port_handle[0], stream_id = ports_streams_dict[0][0],
mac_dst = '{ 7 7 7-7:7:7}', save_to_yaml = '/tmp/d3.yaml'))
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle))
res = check_res(hlt_client.traffic_config(mode = 'create', bidirectional = True, length_mode = 'fixed',
port_handle = port_handle[0], port_handle2 = port_handle[1],
transmit_mode = 'single_burst', pkts_per_burst = 100, rate_pps = 100,
mac_src = '1-2-3-4-5-6',
mac_dst = '6:5:4:4:5:6',
save_to_yaml = '/tmp/imix.yaml'))
ports_streams_dict.add_streams_from_res(res)
print('Create single_burst 100 packets rate_pps=100 on port 0')
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[0], transmit_mode = 'single_burst',
pkts_per_burst = 100, rate_pps = 100))
ports_streams_dict.add_streams_from_res(res)
# playground - creating various streams on port 1
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt2.yaml',
tcp_src_port_mode = 'decrement',
tcp_src_port_count = 10, tcp_dst_port_count = 10, tcp_dst_port_mode = 'random'))
ports_streams_dict.add_streams_from_res(res)
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt3.yaml',
l4_protocol = 'udp',
udp_src_port_mode = 'decrement',
udp_src_port_count = 10, udp_dst_port_count = 10, udp_dst_port_mode = 'random'))
ports_streams_dict.add_streams_from_res(res)
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt4.yaml',
length_mode = 'increment',
#ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2))
ports_streams_dict.add_streams_from_res(res)
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt5.yaml',
length_mode = 'decrement', frame_size_min = 100, frame_size_max = 3000,
#ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
#ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2
))
ports_streams_dict.add_streams_from_res(res)
# remove the playground
check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle[1]))
print('Create continuous stream for port 1, rate_pps = 1')
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt1.yaml',
#length_mode = 'increment', l3_length_min = 200,
ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2))
check_res(hlt_client.traffic_control(action = 'run', port_handle = port_handle))
wait_with_progress(1)
print('Sample after 1 seconds (only packets count)')
res = check_res(hlt_client.traffic_stats(mode = 'all', port_handle = port_handle))
print_brief_stats(res)
print ''
print('Port 0 has finished the burst, put continuous instead with rate 1000. No stopping of other ports.')
check_res(hlt_client.traffic_control(action = 'stop', port_handle = port_handle[0]))
check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle[0]))
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[0], rate_pps = 1000))
ports_streams_dict.add_streams_from_res(res)
check_res(hlt_client.traffic_control(action = 'run', port_handle = port_handle[0]))
wait_with_progress(5)
print('Sample after another 5 seconds (only packets count)')
res = check_res(hlt_client.traffic_stats(mode = 'aggregate', port_handle = port_handle))
print_brief_stats(res)
print ''
print('Stop traffic at port 1')
res = check_res(hlt_client.traffic_control(action = 'stop', port_handle = port_handle[1]))
wait_with_progress(5)
print('Sample after another %s seconds (only packets count)' % 5)
res = check_res(hlt_client.traffic_stats(mode = 'aggregate', port_handle = port_handle))
print_brief_stats(res)
print ''
print('Full HLT stats:')
pprint(res)
check_res(hlt_client.cleanup_session())
except Exception as e:
print(traceback.print_exc())
print(e)
raise
finally:
print('Done.')
|
kisel/trex-core
|
scripts/automation/regression/hltapi_playground.py
|
Python
|
apache-2.0
| 10,579
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2api.controllers.resource import ResourceController
from st2common.models.api.trace import TraceAPI
from st2common.persistence.trace import Trace
from st2common.rbac.types import PermissionType
__all__ = ["TracesController"]
class TracesController(ResourceController):
model = TraceAPI
access = Trace
supported_filters = {
"trace_tag": "trace_tag",
"execution": "action_executions.object_id",
"rule": "rules.object_id",
"trigger_instance": "trigger_instances.object_id",
}
query_options = {"sort": ["-start_timestamp", "trace_tag"]}
def get_all(
self,
exclude_attributes=None,
include_attributes=None,
sort=None,
offset=0,
limit=None,
requester_user=None,
**raw_filters,
):
# Use a custom sort order when filtering on a timestamp so we return a correct result as
# expected by the user
query_options = None
if "sort_desc" in raw_filters and raw_filters["sort_desc"] == "True":
query_options = {"sort": ["-start_timestamp", "trace_tag"]}
elif "sort_asc" in raw_filters and raw_filters["sort_asc"] == "True":
query_options = {"sort": ["+start_timestamp", "trace_tag"]}
return self._get_all(
exclude_fields=exclude_attributes,
include_fields=include_attributes,
sort=sort,
offset=offset,
limit=limit,
query_options=query_options,
raw_filters=raw_filters,
requester_user=requester_user,
)
def get_one(self, id, requester_user):
return self._get_one_by_id(
id, requester_user=requester_user, permission_type=PermissionType.TRACE_VIEW
)
traces_controller = TracesController()
|
nzlosh/st2
|
st2api/st2api/controllers/v1/traces.py
|
Python
|
apache-2.0
| 2,445
|
"""Common test objects."""
import copy
from datetime import datetime
import json
from unittest.mock import ANY, patch
from homeassistant.components import mqtt
from homeassistant.components.mqtt import debug_info
from homeassistant.components.mqtt.const import MQTT_DISCONNECTED
from homeassistant.components.mqtt.mixins import MQTT_ATTRIBUTES_BLOCKED
from homeassistant.const import ATTR_ASSUMED_STATE, STATE_UNAVAILABLE
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.setup import async_setup_component
from tests.common import async_fire_mqtt_message, mock_registry
DEFAULT_CONFIG_DEVICE_INFO_ID = {
"identifiers": ["helloworld"],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
"suggested_area": "default_area",
}
DEFAULT_CONFIG_DEVICE_INFO_MAC = {
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
"suggested_area": "default_area",
}
async def help_test_availability_when_connection_lost(hass, mqtt_mock, domain, config):
"""Test availability after MQTT disconnection."""
assert await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
mqtt_mock.connected = False
async_dispatcher_send(hass, MQTT_DISCONNECTED)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async def help_test_availability_without_topic(hass, mqtt_mock, domain, config):
"""Test availability without defined availability topic."""
assert "availability_topic" not in config[domain]
assert await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_payload(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_topic"] = "availability-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if state_topic:
async_fire_mqtt_message(hass, state_topic, state_message)
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if state_topic:
async_fire_mqtt_message(hass, state_topic, state_message)
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload_all(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_mode"] = "all"
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic2", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_default_availability_list_payload_any(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by default payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_mode"] = "any"
config[domain]["availability"] = [
{"topic": "availability-topic1"},
{"topic": "availability-topic2"},
]
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic2", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async def help_test_default_availability_list_single(
hass,
mqtt_mock,
caplog,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability list and availability_topic are mutually exclusive.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability"] = [
{"topic": "availability-topic1"},
]
config[domain]["availability_topic"] = "availability-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is None
assert (
"Invalid config for [sensor.mqtt]: two or more values in the same group of exclusion 'availability'"
in caplog.text
)
async def help_test_custom_availability_payload(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test availability by custom payload with defined topic.
This is a test helper for the MqttAvailability mixin.
"""
# Add availability settings to config
config = copy.deepcopy(config)
config[domain]["availability_topic"] = "availability-topic"
config[domain]["payload_available"] = "good"
config[domain]["payload_not_available"] = "nogood"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "good")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
if no_assumed_state:
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "availability-topic", "nogood")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
if state_topic:
async_fire_mqtt_message(hass, state_topic, state_message)
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "good")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_discovery_update_availability(
hass,
mqtt_mock,
domain,
config,
no_assumed_state=False,
state_topic=None,
state_message=None,
):
"""Test update of discovered MQTTAvailability.
This is a test helper for the MQTTAvailability mixin.
"""
# Add availability settings to config
config1 = copy.deepcopy(config)
config1[domain]["availability_topic"] = "availability-topic1"
config2 = copy.deepcopy(config)
config2[domain]["availability"] = [
{"topic": "availability-topic2"},
{"topic": "availability-topic3"},
]
config3 = copy.deepcopy(config)
config3[domain]["availability_topic"] = "availability-topic4"
data1 = json.dumps(config1[domain])
data2 = json.dumps(config2[domain])
data3 = json.dumps(config3[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic1", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Change availability_topic
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "availability-topic1", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "availability-topic3", "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Change availability_topic
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data3)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "availability-topic2", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "availability-topic3", "online")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "availability-topic4", "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async def help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, domain, config
):
"""Test the setting of attribute via MQTT with JSON payload.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic", '{ "val": "100" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "100"
async def help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, domain, config, extra_blocked_attributes
):
"""Test the setting of blocked attribute via MQTT with JSON payload.
This is a test helper for the MqttAttributes mixin.
"""
extra_blocked_attributes = extra_blocked_attributes or []
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
data = json.dumps(config[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
val = "abc123"
for attr in MQTT_ATTRIBUTES_BLOCKED:
async_fire_mqtt_message(hass, "attr-topic", json.dumps({attr: val}))
state = hass.states.get(f"{domain}.test")
assert state.attributes.get(attr) != val
for attr in extra_blocked_attributes:
async_fire_mqtt_message(hass, "attr-topic", json.dumps({attr: val}))
state = hass.states.get(f"{domain}.test")
assert state.attributes.get(attr) != val
async def help_test_setting_attribute_with_template(hass, mqtt_mock, domain, config):
"""Test the setting of attribute via MQTT with JSON payload.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
config[domain]["json_attributes_template"] = "{{ value_json['Timer1'] | tojson }}"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass, "attr-topic", json.dumps({"Timer1": {"Arm": 0, "Time": "22:18"}})
)
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("Arm") == 0
assert state.attributes.get("Time") == "22:18"
async def help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, domain, config
):
"""Test attributes get extracted from a JSON result.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic", '[ "list", "of", "things"]')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") is None
assert "JSON result was not a dictionary" in caplog.text
async def help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, domain, config
):
"""Test JSON validation of attributes.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config = copy.deepcopy(config)
config[domain]["json_attributes_topic"] = "attr-topic"
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic", "This is not JSON")
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") is None
assert "Erroneous JSON: This is not JSON" in caplog.text
async def help_test_discovery_update_attr(hass, mqtt_mock, caplog, domain, config):
"""Test update of discovered MQTTAttributes.
This is a test helper for the MqttAttributes mixin.
"""
# Add JSON attributes settings to config
config1 = copy.deepcopy(config)
config1[domain]["json_attributes_topic"] = "attr-topic1"
config2 = copy.deepcopy(config)
config2[domain]["json_attributes_topic"] = "attr-topic2"
data1 = json.dumps(config1[domain])
data2 = json.dumps(config2[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "100" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "100"
# Change json_attributes_topic
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "50" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "100"
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "attr-topic2", '{ "val": "75" }')
state = hass.states.get(f"{domain}.test")
assert state.attributes.get("val") == "75"
async def help_test_unique_id(hass, mqtt_mock, domain, config):
"""Test unique id option only creates one entity per unique_id."""
assert await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(domain)) == 1
async def help_test_discovery_removal(hass, mqtt_mock, caplog, domain, data):
"""Test removal of discovered component.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is not None
assert state.name == "test"
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is None
async def help_test_discovery_update(
hass,
mqtt_mock,
caplog,
domain,
discovery_data1,
discovery_data2,
state_data1=None,
state_data2=None,
):
"""Test update of discovered component.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", discovery_data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is not None
assert state.name == "Beer"
if state_data1:
for (mqtt_messages, expected_state, attributes) in state_data1:
for (topic, data) in mqtt_messages:
async_fire_mqtt_message(hass, topic, data)
state = hass.states.get(f"{domain}.beer")
if expected_state:
assert state.state == expected_state
if attributes:
for (attr, value) in attributes:
assert state.attributes.get(attr) == value
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", discovery_data2)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is not None
assert state.name == "Milk"
if state_data2:
for (mqtt_messages, expected_state, attributes) in state_data2:
for (topic, data) in mqtt_messages:
async_fire_mqtt_message(hass, topic, data)
state = hass.states.get(f"{domain}.beer")
if expected_state:
assert state.state == expected_state
if attributes:
for (attr, value) in attributes:
assert state.attributes.get(attr) == value
state = hass.states.get(f"{domain}.milk")
assert state is None
async def help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, domain, data1, discovery_update
):
"""Test update of discovered component without changes.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
assert not discovery_update.called
async def help_test_discovery_broken(hass, mqtt_mock, caplog, domain, data1, data2):
"""Test handling of bad discovery message."""
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.beer")
assert state is None
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.milk")
assert state is not None
assert state.name == "Milk"
state = hass.states.get(f"{domain}.beer")
assert state is None
async def help_test_entity_device_info_with_identifier(hass, mqtt_mock, domain, config):
"""Test device registry integration.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
assert device.suggested_area == "default_area"
async def help_test_entity_device_info_with_connection(hass, mqtt_mock, domain, config):
"""Test device registry integration.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_MAC)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
)
assert device is not None
assert device.connections == {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
assert device.suggested_area == "default_area"
async def help_test_entity_device_info_remove(hass, mqtt_mock, domain, config):
"""Test device registry remove."""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
dev_registry = dr.async_get(hass)
ent_registry = er.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = dev_registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique")
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", "")
await hass.async_block_till_done()
device = dev_registry.async_get_device({("mqtt", "helloworld")})
assert device is None
assert not ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique")
async def help_test_entity_device_info_update(hass, mqtt_mock, domain, config):
"""Test device registry update.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Milk"
async def help_test_entity_id_update_subscriptions(
hass, mqtt_mock, domain, config, topics=None
):
"""Test MQTT subscriptions are managed when entity_id is updated."""
# Add unique_id to config
config = copy.deepcopy(config)
config[domain]["unique_id"] = "TOTALLY_UNIQUE"
if topics is None:
# Add default topics to config
config[domain]["availability_topic"] = "avty-topic"
config[domain]["state_topic"] = "test-topic"
topics = ["avty-topic", "test-topic"]
assert len(topics) > 0
registry = mock_registry(hass, {})
assert await async_setup_component(
hass,
domain,
config,
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is not None
assert mqtt_mock.async_subscribe.call_count == len(topics)
for topic in topics:
mqtt_mock.async_subscribe.assert_any_call(topic, ANY, ANY, ANY)
mqtt_mock.async_subscribe.reset_mock()
registry.async_update_entity(f"{domain}.test", new_entity_id=f"{domain}.milk")
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.test")
assert state is None
state = hass.states.get(f"{domain}.milk")
assert state is not None
for topic in topics:
mqtt_mock.async_subscribe.assert_any_call(topic, ANY, ANY, ANY)
async def help_test_entity_id_update_discovery_update(
hass, mqtt_mock, domain, config, topic=None
):
"""Test MQTT discovery update after entity_id is updated."""
# Add unique_id to config
config = copy.deepcopy(config)
config[domain]["unique_id"] = "TOTALLY_UNIQUE"
if topic is None:
# Add default topic to config
config[domain]["availability_topic"] = "avty-topic"
topic = "avty-topic"
ent_registry = mock_registry(hass, {})
data = json.dumps(config[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, topic, "online")
state = hass.states.get(f"{domain}.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, topic, "offline")
state = hass.states.get(f"{domain}.test")
assert state.state == STATE_UNAVAILABLE
ent_registry.async_update_entity(f"{domain}.test", new_entity_id=f"{domain}.milk")
await hass.async_block_till_done()
config[domain]["availability_topic"] = f"{topic}_2"
data = json.dumps(config[domain])
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(domain)) == 1
async_fire_mqtt_message(hass, f"{topic}_2", "online")
state = hass.states.get(f"{domain}.milk")
assert state.state != STATE_UNAVAILABLE
async def help_test_entity_debug_info(hass, mqtt_mock, domain, config):
"""Test debug_info.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
async def help_test_entity_debug_info_max_messages(hass, mqtt_mock, domain, config):
"""Test debug_info message overflow.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
for i in range(0, debug_info.STORED_MESSAGES + 1):
async_fire_mqtt_message(hass, "test-topic", f"{i}")
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert (
len(debug_info_data["entities"][0]["subscriptions"][0]["messages"])
== debug_info.STORED_MESSAGES
)
messages = [
{
"payload": f"{i}",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "test-topic",
}
for i in range(1, debug_info.STORED_MESSAGES + 1)
]
assert {"topic": "test-topic", "messages": messages} in debug_info_data["entities"][
0
]["subscriptions"]
async def help_test_entity_debug_info_message(
hass, mqtt_mock, domain, config, topic=None, payload=None
):
"""Test debug_info message overflow.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
if topic is None:
# Add default topic to config
config["state_topic"] = "state-topic"
topic = "state-topic"
if payload is None:
payload = "ON"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": topic, "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(hass, topic, payload)
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {
"topic": topic,
"messages": [
{
"payload": payload,
"qos": 0,
"retain": False,
"time": start_dt,
"topic": topic,
}
],
} in debug_info_data["entities"][0]["subscriptions"]
async def help_test_entity_debug_info_remove(hass, mqtt_mock, domain, config):
"""Test debug_info.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
registry = dr.async_get(hass)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
assert debug_info_data["entities"][0]["entity_id"] == f"{domain}.test"
entity_id = debug_info_data["entities"][0]["entity_id"]
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", "")
await hass.async_block_till_done()
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 0
assert entity_id not in hass.data[debug_info.DATA_MQTT_DEBUG_INFO]["entities"]
async def help_test_entity_debug_info_update_entity_id(hass, mqtt_mock, domain, config):
"""Test debug_info.
This is a test helper for MQTT debug_info.
"""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["unique_id"] = "veryunique"
dev_registry = dr.async_get(hass)
ent_registry = mock_registry(hass, {})
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla/config", data)
await hass.async_block_till_done()
device = dev_registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert debug_info_data["entities"][0]["entity_id"] == f"{domain}.test"
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
ent_registry.async_update_entity(f"{domain}.test", new_entity_id=f"{domain}.milk")
await hass.async_block_till_done()
await hass.async_block_till_done()
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 1
assert (
debug_info_data["entities"][0]["discovery_data"]["topic"]
== f"homeassistant/{domain}/bla/config"
)
assert debug_info_data["entities"][0]["discovery_data"]["payload"] == config
assert debug_info_data["entities"][0]["entity_id"] == f"{domain}.milk"
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {"topic": "test-topic", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
assert len(debug_info_data["triggers"]) == 0
assert (
f"{domain}.test" not in hass.data[debug_info.DATA_MQTT_DEBUG_INFO]["entities"]
)
async def help_test_entity_disabled_by_default(hass, mqtt_mock, domain, config):
"""Test device registry remove."""
# Add device settings to config
config = copy.deepcopy(config[domain])
config["device"] = copy.deepcopy(DEFAULT_CONFIG_DEVICE_INFO_ID)
config["enabled_by_default"] = False
config["unique_id"] = "veryunique1"
dev_registry = dr.async_get(hass)
ent_registry = er.async_get(hass)
# Discover a disabled entity
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla1/config", data)
await hass.async_block_till_done()
entity_id = ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique1")
assert not hass.states.get(entity_id)
assert dev_registry.async_get_device({("mqtt", "helloworld")})
# Discover an enabled entity, tied to the same device
config["enabled_by_default"] = True
config["unique_id"] = "veryunique2"
data = json.dumps(config)
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla2/config", data)
await hass.async_block_till_done()
entity_id = ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique2")
assert hass.states.get(entity_id)
# Remove the enabled entity, both entities and the device should be removed
async_fire_mqtt_message(hass, f"homeassistant/{domain}/bla2/config", "")
await hass.async_block_till_done()
assert not ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique1")
assert not ent_registry.async_get_entity_id(domain, mqtt.DOMAIN, "veryunique2")
assert not dev_registry.async_get_device({("mqtt", "helloworld")})
|
sander76/home-assistant
|
tests/components/mqtt/test_common.py
|
Python
|
apache-2.0
| 41,174
|
#!/usr/bin/env python3
"""Script to generate unannotated baseline stubs using stubgen.
Basic usage:
$ python3 scripts/create_baseline_stubs.py <project on PyPI>
Run with -h for more help.
"""
import argparse
import os
import re
import shutil
import subprocess
import sys
from typing import Optional, Tuple
PYRIGHT_CONFIG = "pyrightconfig.stricter.json"
def search_pip_freeze_output(project: str, output: str) -> Optional[Tuple[str, str]]:
# Look for lines such as "typed-ast==1.4.2". '-' matches '_' and
# '_' matches '-' in project name, so that "typed_ast" matches
# "typed-ast", and vice versa.
regex = "^(" + re.sub(r"[-_]", "[-_]", project) + ")==(.*)"
m = re.search(regex, output, flags=re.IGNORECASE | re.MULTILINE)
if not m:
return None
return m.group(1), m.group(2)
def get_installed_package_info(project: str) -> Optional[Tuple[str, str]]:
"""Find package information from pip freeze output.
Match project name somewhat fuzzily (case sensitive; '-' matches '_', and
vice versa).
Return (normalized project name, installed version) if successful.
"""
r = subprocess.run(["pip", "freeze"], capture_output=True, text=True, check=True)
return search_pip_freeze_output(project, r.stdout)
def run_stubgen(package: str) -> None:
print(f"Running stubgen: stubgen -p {package}")
subprocess.run(["python", "-m", "mypy.stubgen", "-p", package], check=True)
def copy_stubs(src_base_dir: str, package: str, stub_dir: str) -> None:
"""Copy generated stubs to the target directory under stub_dir/."""
print(f"Copying stubs to {stub_dir}")
if not os.path.isdir(stub_dir):
os.mkdir(stub_dir)
src_dir = os.path.join(src_base_dir, package)
if os.path.isdir(src_dir):
shutil.copytree(src_dir, os.path.join(stub_dir, package))
else:
src_file = os.path.join("out", package + ".pyi")
if not os.path.isfile(src_file):
sys.exit("Error: Cannot find generated stubs")
shutil.copy(src_file, stub_dir)
def run_black(stub_dir: str) -> None:
print(f"Running black: black {stub_dir}")
subprocess.run(["black", stub_dir])
def run_isort(stub_dir: str) -> None:
print(f"Running isort: isort {stub_dir}")
subprocess.run(["python3", "-m", "isort", stub_dir])
def create_metadata(stub_dir: str, version: str) -> None:
"""Create a METADATA.toml file."""
m = re.match(r"[0-9]+.[0-9]+", version)
if m is None:
sys.exit(f"Error: Cannot parse version number: {version}")
fnam = os.path.join(stub_dir, "METADATA.toml")
version = m.group(0)
assert not os.path.exists(fnam)
print(f"Writing {fnam}")
with open(fnam, "w") as f:
f.write(f'version = "{version}.*"\n')
def add_pyright_exclusion(stub_dir: str) -> None:
"""Exclude stub_dir from strict pyright checks."""
with open(PYRIGHT_CONFIG) as f:
lines = f.readlines()
i = 0
while i < len(lines) and not lines[i].strip().startswith('"exclude": ['):
i += 1
assert i < len(lines), f"Error parsing {PYRIGHT_CONFIG}"
while not lines[i].strip().startswith("]"):
i += 1
line_to_add = f' "{stub_dir}",'
initial = i - 1
while lines[i].lower() > line_to_add.lower():
i -= 1
if lines[i + 1].strip().rstrip(",") == line_to_add.strip().rstrip(","):
print(f"{PYRIGHT_CONFIG} already up-to-date")
return
if i == initial:
# Special case: when adding to the end of the list, commas need tweaking
line_to_add = line_to_add.rstrip(",")
lines[i] = lines[i].rstrip() + ",\n"
lines.insert(i + 1, line_to_add + "\n")
print(f"Updating {PYRIGHT_CONFIG}")
with open(PYRIGHT_CONFIG, "w") as f:
f.writelines(lines)
def main() -> None:
parser = argparse.ArgumentParser(
description="""Generate baseline stubs automatically for an installed pip package
using stubgen. Also run black and isort. If the name of
the project is different from the runtime Python package name, you must
also use --package (example: --package yaml PyYAML)."""
)
parser.add_argument("project", help="name of PyPI project for which to generate stubs under stubs/")
parser.add_argument("--package", help="generate stubs for this Python package (defaults to project)")
args = parser.parse_args()
project = args.project
package = args.package
if not re.match(r"[a-zA-Z0-9-_.]+$", project):
sys.exit(f"Invalid character in project name: {project!r}")
if not package:
package = project # TODO: infer from installed files
if not os.path.isdir("stubs") or not os.path.isdir("stdlib"):
sys.exit("Error: Current working directory must be the root of typeshed repository")
# Get normalized project name and version of installed package.
info = get_installed_package_info(project)
if info is None:
print(f'Error: "{project}" is not installed', file=sys.stderr)
print("", file=sys.stderr)
print(f'Suggestion: Run "python3 -m pip install {project}" and try again', file=sys.stderr)
sys.exit(1)
project, version = info
stub_dir = os.path.join("stubs", project)
if os.path.exists(stub_dir):
sys.exit(f"Error: {stub_dir} already exists (delete it first)")
run_stubgen(package)
# Stubs were generated under out/. Copy them to stubs/.
copy_stubs("out", package, stub_dir)
run_isort(stub_dir)
run_black(stub_dir)
create_metadata(stub_dir, version)
# Since the generated stubs won't have many type annotations, we
# have to exclude them from strict pyright checks.
add_pyright_exclusion(stub_dir)
print("\nDone!\n\nSuggested next steps:")
print(f" 1. Manually review the generated stubs in {stub_dir}")
print(f' 2. Run "MYPYPATH={stub_dir} python3 -m mypy.stubtest {package}" to check the stubs against runtime')
print(f' 3. Run "mypy {stub_dir}" to check for errors')
print(f' 4. Run "black {stub_dir}" and "isort {stub_dir}" (if you\'ve made code changes)')
print(f' 5. Run "flake8 {stub_dir}" to check for e.g. unused imports')
print(" 6. Commit the changes on a new branch and create a typeshed PR")
if __name__ == "__main__":
main()
|
google/intellij-community
|
python/helpers/typeshed/scripts/create_baseline_stubs.py
|
Python
|
apache-2.0
| 6,346
|
# -*- coding: utf-8 -*-
"""Provides concept object."""
from __future__ import absolute_import
from .. import t1types
from ..entity import Entity
class Concept(Entity):
"""Concept entity."""
collection = 'concepts'
resource = 'concept'
_relations = {
'advertiser',
}
_pull = {
'advertiser_id': int,
'created_on': t1types.strpt,
'id': int,
'name': None,
'status': t1types.int_to_bool,
'updated_on': t1types.strpt,
'version': int,
}
_push = _pull.copy()
_push.update({
'status': int,
})
def __init__(self, session, properties=None, **kwargs):
super(Concept, self).__init__(session, properties, **kwargs)
|
MediaMath/t1-python
|
terminalone/models/concept.py
|
Python
|
apache-2.0
| 729
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a string.
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return f.read(name).split()
f.close()
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], '->', labels[i, 0])
print(reverse_dictionary[batch[i]], '->', reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
tf.initialize_all_variables().run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn and matplotlib to visualize embeddings.")
|
martinbede/second-sight
|
tensorflow/examples/tutorials/word2vec/word2vec_basic.py
|
Python
|
apache-2.0
| 8,770
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilty functions for control flow.
This file is necessary to avoid cyclic dependencies between ops.py and
control_flow_ops.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import traceback
from tensorflow.python.platform import tf_logging as logging
ENABLE_CONTROL_FLOW_V2 = (os.getenv("TF_ENABLE_CONTROL_FLOW_V2", "0") != "0" or
os.getenv("TF_ENABLE_COND_V2", "0") != "0" or
os.getenv("TF_ENABLE_WHILE_V2", "0") != "0" or
os.getenv("TF_ENABLE_TENSOR_ARRAY_V2", "0") != "0")
def EnableControlFlowV2(graph):
"""Returns whether control flow v2 should be used in `graph`."""
# Enable new control flow in FuncGraphs (but not legacy _FuncGraphs).
# TODO(skyewm): do something better than hasattr without messing up imports.
return ENABLE_CONTROL_FLOW_V2 or (
graph.building_function and not hasattr(graph, "_captured"))
def IsInXLAContext(op):
try:
xla_compile = op.get_attr("_XlaCompile")
if xla_compile: return True
except ValueError:
pass
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
return GetContainingXLAContext(ctxt) is not None
def InXlaContext(graph):
ctxt = graph._get_control_flow_context() # pylint: disable=protected-access
return GetContainingXLAContext(ctxt) is not None
def GraphOrParentsInXlaContext(graph):
while True:
if InXlaContext(graph): return True
try:
graph = graph.outer_graph
except AttributeError:
return False
def IsInWhileLoop(op):
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
return GetContainingWhileContext(ctxt) is not None
def IsInCond(op):
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
return GetContainingCondContext(ctxt) is not None
def IsSwitch(op):
"""Return true if `op` is a Switch."""
return op.type == "Switch" or op.type == "RefSwitch"
def IsMerge(op):
"""Return true if `op` is a Merge."""
return op.type == "Merge" or op.type == "RefMerge"
def IsLoopEnter(op):
"""Returns true if `op` is an Enter."""
return op.type == "Enter" or op.type == "RefEnter"
def IsLoopExit(op):
"""Return true if `op` is an Exit."""
return op.type == "Exit" or op.type == "RefExit"
def IsCondSwitch(op):
"""Return true if `op` is the Switch for a conditional."""
if not IsSwitch(op):
return False
if not op.outputs:
return False
# Switch nodes are not part of the cond control flow context that they
# represent, so consider the consumers of its outputs to determine if it is
# cond switch or not. A switch is a cond switch iff all its consumers are in
# cond contexts.
is_cond_switch = True
for o in op.outputs:
for c in o.consumers():
ctxt = c._get_control_flow_context() # pylint: disable=protected-access
if IsLoopEnter(c):
ctxt = ctxt.outer_context
is_cond_switch = is_cond_switch and (ctxt is not None and
ctxt.IsCondContext())
return is_cond_switch
def IsCondMerge(op):
"""Return true if `op` is the Merge for a conditional."""
if not IsMerge(op):
return False
if not op.inputs:
return False
# Merge nodes are not part of the cond control flow context that they
# represent, so consider the inputs to the merge of to determine if it is
# cond merge or not: A merge is a cond merge iff all its inputs are in
# cond contexts.
is_cond_merge = True
for i in op.inputs:
ctxt = GetOutputContext(i.op)
is_cond_merge = is_cond_merge and ctxt is not None and ctxt.IsCondContext()
return is_cond_merge
def IsLoopSwitch(op):
"""Return true if `op` is the Switch for a while loop."""
if IsSwitch(op):
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
return ctxt is not None and ctxt.IsWhileContext() and not IsCondSwitch(op)
return False
def IsLoopMerge(op):
"""Return true if `op` is the Merge for a while loop."""
if IsMerge(op):
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
return ctxt is not None and ctxt.IsWhileContext() and not IsCondMerge(op)
return False
def IsLoopConstantEnter(op):
"""Return true iff op is a loop invariant."""
return IsLoopEnter(op) and op.get_attr("is_constant")
def GetLoopConstantEnter(value):
"""Return the enter op if we can infer `value` to be a loop invariant."""
id_ops = {"Switch", "RefSwitch", "Identity", "RefIdentity"}
op = value.op
while op.type in id_ops:
op = op.inputs[0].op
return op if IsLoopConstantEnter(op) else None
def GetOutputContext(op):
"""Return the control flow context for the output of an op."""
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
# Exit nodes usually have a control flow context, except in the case where the
# exit node was imported via import_graph_def (in which case no nodes have
# control flow contexts).
if ctxt is not None and IsLoopExit(op):
ctxt = ctxt.outer_context
return ctxt
def GetContainingWhileContext(ctxt, stop_ctxt=None):
"""Returns the first ancestor WhileContext of `ctxt`.
Returns `ctxt` if `ctxt` is a WhileContext, or None if `ctxt` is not in a
while loop.
Args:
ctxt: ControlFlowContext
stop_ctxt: ControlFlowContext, optional. If provided, the search will end
if it sees stop_ctxt.
Returns:
`ctxt` if `ctxt` is a WhileContext, the most nested WhileContext containing
`ctxt`, or None if `ctxt` is not in a while loop. If `stop_ctxt` is not
`None`, this returns `ctxt` if it matches `stop_ctxt` in its traversal.
"""
while ctxt:
if ctxt.IsWhileContext() or ctxt == stop_ctxt: return ctxt
ctxt = ctxt.outer_context
return None
def GetContainingXLAContext(ctxt):
"""Returns the first ancestor XLAContext of `ctxt`.
Returns `ctxt` if `ctxt` is a XLAContext, or None if `ctxt` is not in a
while loop.
Args:
ctxt: ControlFlowContext
Returns:
`ctxt` if `ctxt` is a XLAContext, the most nested XLAContext containing
`ctxt`, or None if `ctxt` is not in a while loop.
"""
while ctxt:
if ctxt.IsXLAContext(): return ctxt
ctxt = ctxt.outer_context
return None
def GetContainingCondContext(ctxt):
"""Returns the first ancestor CondContext of `ctxt`.
Returns `ctxt` if `ctxt` is a CondContext, or None if `ctxt` is not in a cond.
Args:
ctxt: ControlFlowContext
Returns:
`ctxt` if `ctxt` is a CondContext, the most nested CondContext containing
`ctxt`, or None if `ctxt` is not in a cond.
"""
while ctxt:
if ctxt.IsCondContext(): return ctxt
ctxt = ctxt.outer_context
return None
def IsContainingContext(ctxt, maybe_containing_ctxt):
"""Returns true if `maybe_containing_ctxt` is or contains `ctxt`."""
while ctxt is not maybe_containing_ctxt:
if ctxt is None: return False
ctxt = ctxt.outer_context
return True
def OpInContext(op, ctxt):
return IsContainingContext(op._get_control_flow_context(), ctxt) # pylint: disable=protected-access
def TensorInContext(tensor, ctxt):
return OpInContext(tensor.op, ctxt)
def CheckInputFromValidContext(op, input_op):
"""Returns whether `input_op` can be used from `op`s context.
Conceptually, only inputs from op's while context or any ancestor while
context (including outside of any context) are valid. In practice, there are
many other edge cases as well.
Args:
op: Operation
input_op: Operation
Raises:
ValueError: if input_op is from an invalid context.
"""
op_ctxt = op._get_control_flow_context() # pylint: disable=protected-access
input_ctxt = GetOutputContext(input_op)
valid = False
if not input_ctxt:
# input_op isn't in a control flow context.
valid = True
elif op_ctxt is input_ctxt:
# input_op is in the same context as op.
valid = True
else:
while_ctxt = GetContainingWhileContext(op_ctxt)
input_while_ctxt = GetContainingWhileContext(input_ctxt)
if while_ctxt is None:
if input_while_ctxt is None:
# Neither op nor input_op is in a while loop, but one or both are in
# conds. We allow this, although execution will fail if the branch
# corresponding to input_op's cond context isn't taken.
valid = True
# Invalid if op isn't in a while loop and input_op is. Unless...
if IsLoopEnter(op):
# WhileContext._BuildLoop clears context for Enter nodes.
valid = True
if IsSwitch(op):
# CondContext.AddValue clears context for Switch nodes.
valid = True
elif IsContainingContext(while_ctxt, input_while_ctxt):
# input_op is in a while loop which contains op's while loop (or not in a
# while loop at all).
valid = True
elif (while_ctxt.grad_state and
IsContainingContext(while_ctxt.grad_state.forward_context,
input_while_ctxt)):
# op is in a gradient context and input_op is in the associated forward
# pass context or an ancestor thereof. This case is need to build while
# loop gradients.
# NOTE(skyewm): we theoretically also need this case for custom gradient
# functions that close over tensors from ancestor contexts, but I haven't
# verified this.
valid = True
elif (while_ctxt.grad_state and
while_ctxt.grad_state.forward_context is
input_while_ctxt._outer_context): # pylint: disable=protected-access
# op is in a gradient context and input_op is in a child of the associated
# forward pass context. This case is needed for the gradients of while
# loops with conds.
valid = True
elif (input_while_ctxt.grad_state and
input_while_ctxt.grad_state.forward_context is while_ctxt):
# input_op is in the gradient context of op's context. This case is needed
# when the gradient of a while loop gradient is requested (this will
# eventually fail unless there is a stop_gradient() or similar).
valid = True
elif (input_while_ctxt.grad_state and
input_ctxt.grad_state.forward_context.grad_state and
input_ctxt.grad_state.forward_context.grad_state.forward_context is
while_ctxt):
# input_op is in the grad grad context of op's context. This case is
# needed when the gradient of a while loop gradient is requested (this
# will eventually fail unless there is a stop_gradient() or similar).
valid = True
if not valid:
if while_ctxt:
error_msg = (
"Cannot use '%s' as input to '%s' because they are in different while"
" loops." % (op.name, input_op.name))
else:
error_msg = (
"Cannot use '%s' as input to '%s' because '%s' is in a while loop."
% (input_op.name, op.name, input_op.name))
# Log the error message plus the relevant stack traces. The stacks may be
# useful for debugging this error, but we don't want to raise an
# unreadable exception.
log_msg = error_msg
log_msg += "\n\n%s while context: %s" % (op.name, while_ctxt)
log_msg += "\n%s while context: %s" % (input_op.name, input_while_ctxt)
log_msg += "\n\nTraceback for %s:\n%s\nTraceback for %s:\n%s\n" % (
op.name, "".join(traceback.format_list(op.traceback)),
input_op.name, "".join(traceback.format_list(input_op.traceback)))
logging.info(log_msg)
raise ValueError(error_msg + " See info log for more details.")
|
hfp/tensorflow-xsmm
|
tensorflow/python/ops/control_flow_util.py
|
Python
|
apache-2.0
| 12,264
|
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import threading
import uuid
import dcm.agent.utils as agent_utils
from dcm.agent.events.globals import global_space as dcm_events
_g_logger = logging.getLogger(__name__)
_g_message_uuid = str(uuid.uuid4()).split("-")[0]
_g_message_id_count = 0
_g_guid_lock = threading.RLock()
def new_message_id():
# note: using uuid here caused deadlock in tests
global _g_message_id_count
global _g_message_uuid
_g_guid_lock.acquire()
try:
_g_message_id_count = _g_message_id_count + 1
finally:
_g_guid_lock.release()
return _g_message_uuid + str(_g_message_id_count)
class MessageTimer(object):
def __init__(self, timeout, callback, message_doc):
self._send_doc = message_doc
self._timeout = timeout
self._cb = callback
self._timer = None
self._lock = threading.RLock()
self.message_id = message_doc['message_id']
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
@agent_utils.class_method_sync
def send(self, conn):
_g_logger.info("Resending reply to %s" % self._send_doc["request_id"])
self._send_doc['entity'] = "timer"
conn.send(self._send_doc)
self._timer = dcm_events.register_callback(
self._cb, args=[self], delay=self._timeout)
@agent_utils.class_method_sync
def cancel(self):
if self._timer is None:
return
dcm_events.cancel_callback(self._timer)
self._timer = None
|
JPWKU/unix-agent
|
src/dcm/agent/messaging/utils.py
|
Python
|
apache-2.0
| 2,118
|
"""Test the openml loader.
"""
import gzip
import warnings
import json
import os
import re
from importlib import resources
from io import BytesIO
import numpy as np
import scipy.sparse
import sklearn
import pytest
from sklearn import config_context
from sklearn.datasets import fetch_openml as fetch_openml_orig
from sklearn.datasets._openml import (
_open_openml_url,
_arff,
_DATA_FILE,
_OPENML_PREFIX,
_get_data_description_by_id,
_get_local_path,
_retry_with_clean_cache,
)
from sklearn.datasets._arff_parser import (
_convert_arff_data,
_convert_arff_data_dataframe,
_feature_to_dtype,
)
from sklearn.utils import is_scalar_nan
from sklearn.utils._testing import assert_allclose, assert_array_equal
from urllib.error import HTTPError
from sklearn.datasets.tests.test_common import check_return_X_y
from sklearn.externals._arff import ArffContainerType
from functools import partial
from sklearn.utils._testing import fails_if_pypy
OPENML_TEST_DATA_MODULE = "sklearn.datasets.tests.data.openml"
# if True, urlopen will be monkey patched to only use local files
test_offline = True
# Do not use a cache for `fetch_openml` to avoid concurrent writing
# issues with `pytest-xdist`.
# Furthermore sklearn/datasets/tests/data/openml/ is not always consistent
# with the version on openml.org. If one were to load the dataset outside of
# the tests, it may result in data that does not represent openml.org.
fetch_openml = partial(fetch_openml_orig, data_home=None)
def _test_features_list(data_id):
# XXX Test is intended to verify/ensure correct decoding behavior
# Not usable with sparse data or datasets that have columns marked as
# {row_identifier, ignore}
def decode_column(data_bunch, col_idx):
col_name = data_bunch.feature_names[col_idx]
if col_name in data_bunch.categories:
# XXX: This would be faster with np.take, although it does not
# handle missing values fast (also not with mode='wrap')
cat = data_bunch.categories[col_name]
result = [
None if is_scalar_nan(idx) else cat[int(idx)]
for idx in data_bunch.data[:, col_idx]
]
return np.array(result, dtype="O")
else:
# non-nominal attribute
return data_bunch.data[:, col_idx]
data_bunch = fetch_openml(
data_id=data_id, cache=False, target_column=None, as_frame=False
)
# also obtain decoded arff
data_description = _get_data_description_by_id(data_id, None)
sparse = data_description["format"].lower() == "sparse_arff"
if sparse is True:
raise ValueError(
"This test is not intended for sparse data, to keep code relatively simple"
)
url = _DATA_FILE.format(data_description["file_id"])
with _open_openml_url(url, data_home=None) as f:
data_arff = _arff.load(
(line.decode("utf-8") for line in f),
return_type=(_arff.COO if sparse else _arff.DENSE_GEN),
encode_nominal=False,
)
data_downloaded = np.array(list(data_arff["data"]), dtype="O")
for i in range(len(data_bunch.feature_names)):
# XXX: Test per column, as this makes it easier to avoid problems with
# missing values
np.testing.assert_array_equal(
data_downloaded[:, i], decode_column(data_bunch, i)
)
def _fetch_dataset_from_openml(
data_id,
data_name,
data_version,
target_column,
expected_observations,
expected_features,
expected_missing,
expected_data_dtype,
expected_target_dtype,
expect_sparse,
compare_default_target,
):
# fetches a dataset in three various ways from OpenML, using the
# fetch_openml function, and does various checks on the validity of the
# result. Note that this function can be mocked (by invoking
# _monkey_patch_webbased_functions before invoking this function)
data_by_name_id = fetch_openml(
name=data_name, version=data_version, cache=False, as_frame=False
)
assert int(data_by_name_id.details["id"]) == data_id
# Please note that cache=False is crucial, as the monkey patched files are
# not consistent with reality
with warnings.catch_warnings():
# See discussion in PR #19373
# Catching UserWarnings about multiple versions of dataset
warnings.simplefilter("ignore", category=UserWarning)
fetch_openml(name=data_name, cache=False, as_frame=False)
# without specifying the version, there is no guarantee that the data id
# will be the same
# fetch with dataset id
data_by_id = fetch_openml(
data_id=data_id, cache=False, target_column=target_column, as_frame=False
)
assert data_by_id.details["name"] == data_name
assert data_by_id.data.shape == (expected_observations, expected_features)
if isinstance(target_column, str):
# single target, so target is vector
assert data_by_id.target.shape == (expected_observations,)
assert data_by_id.target_names == [target_column]
elif isinstance(target_column, list):
# multi target, so target is array
assert data_by_id.target.shape == (expected_observations, len(target_column))
assert data_by_id.target_names == target_column
assert data_by_id.data.dtype == expected_data_dtype
assert data_by_id.target.dtype == expected_target_dtype
assert len(data_by_id.feature_names) == expected_features
for feature in data_by_id.feature_names:
assert isinstance(feature, str)
# TODO: pass in a list of expected nominal features
for feature, categories in data_by_id.categories.items():
feature_idx = data_by_id.feature_names.index(feature)
# TODO: Remove when https://github.com/numpy/numpy/issues/19300 gets fixed
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message="elementwise comparison failed",
)
values = np.unique(data_by_id.data[:, feature_idx])
values = values[np.isfinite(values)]
assert set(values) <= set(range(len(categories)))
if compare_default_target:
# check whether the data by id and data by id target are equal
data_by_id_default = fetch_openml(data_id=data_id, cache=False, as_frame=False)
np.testing.assert_allclose(data_by_id.data, data_by_id_default.data)
if data_by_id.target.dtype == np.float64:
np.testing.assert_allclose(data_by_id.target, data_by_id_default.target)
else:
assert np.array_equal(data_by_id.target, data_by_id_default.target)
if expect_sparse:
assert isinstance(data_by_id.data, scipy.sparse.csr_matrix)
else:
assert isinstance(data_by_id.data, np.ndarray)
# np.isnan doesn't work on CSR matrix
assert np.count_nonzero(np.isnan(data_by_id.data)) == expected_missing
# test return_X_y option
fetch_func = partial(
fetch_openml,
data_id=data_id,
cache=False,
target_column=target_column,
as_frame=False,
)
check_return_X_y(data_by_id, fetch_func)
return data_by_id
class _MockHTTPResponse:
def __init__(self, data, is_gzip):
self.data = data
self.is_gzip = is_gzip
def read(self, amt=-1):
return self.data.read(amt)
def close(self):
self.data.close()
def info(self):
if self.is_gzip:
return {"Content-Encoding": "gzip"}
return {}
def __iter__(self):
return iter(self.data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def _monkey_patch_webbased_functions(context, data_id, gzip_response):
# monkey patches the urlopen function. Important note: Do NOT use this
# in combination with a regular cache directory, as the files that are
# stored as cache should not be mixed up with real openml datasets
url_prefix_data_description = "https://openml.org/api/v1/json/data/"
url_prefix_data_features = "https://openml.org/api/v1/json/data/features/"
url_prefix_download_data = "https://openml.org/data/v1/"
url_prefix_data_list = "https://openml.org/api/v1/json/data/list/"
path_suffix = ".gz"
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}"
def _file_name(url, suffix):
output = (
re.sub(r"\W", "-", url[len("https://openml.org/") :]) + suffix + path_suffix
)
# Shorten the filenames to have better compatibility with windows 10
# and filenames > 260 characters
return (
output.replace("-json-data-list", "-jdl")
.replace("-json-data-features", "-jdf")
.replace("-json-data-qualities", "-jdq")
.replace("-json-data", "-jd")
.replace("-data_name", "-dn")
.replace("-download", "-dl")
.replace("-limit", "-l")
.replace("-data_version", "-dv")
.replace("-status", "-s")
.replace("-deactivated", "-dact")
.replace("-active", "-act")
)
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with resources.open_binary(data_module, data_file_name) as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, "rb")
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(
url=url,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_data_description,
suffix=".json",
)
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(
url=url,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_data_features,
suffix=".json",
)
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(
url=url,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_download_data,
suffix=".arff",
)
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, ".json")
# load the file itself, to simulate a http error
with resources.open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, "rb")
decoded_s = decompressed_f.read().decode("utf-8")
json_data = json.loads(decoded_s)
if "error" in json_data:
raise HTTPError(
url=None, code=412, msg="Simulated mock error", hdrs=None, fp=None
)
with resources.open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, "rb")
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header("Accept-encoding") == "gzip"
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError("Unknown mocking URL pattern: %s" % url)
# XXX: Global variable
if test_offline:
context.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen)
@pytest.mark.parametrize(
"feature, expected_dtype",
[
({"data_type": "string", "number_of_missing_values": "0"}, object),
({"data_type": "string", "number_of_missing_values": "1"}, object),
({"data_type": "numeric", "number_of_missing_values": "0"}, np.float64),
({"data_type": "numeric", "number_of_missing_values": "1"}, np.float64),
({"data_type": "real", "number_of_missing_values": "0"}, np.float64),
({"data_type": "real", "number_of_missing_values": "1"}, np.float64),
({"data_type": "integer", "number_of_missing_values": "0"}, np.int64),
({"data_type": "integer", "number_of_missing_values": "1"}, np.float64),
({"data_type": "nominal", "number_of_missing_values": "0"}, "category"),
({"data_type": "nominal", "number_of_missing_values": "1"}, "category"),
],
)
def test_feature_to_dtype(feature, expected_dtype):
assert _feature_to_dtype(feature) == expected_dtype
@pytest.mark.parametrize(
"feature", [{"data_type": "datatime", "number_of_missing_values": "0"}]
)
def test_feature_to_dtype_error(feature):
msg = "Unsupported feature: {}".format(feature)
with pytest.raises(ValueError, match=msg):
_feature_to_dtype(feature)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_iris_pandas(monkeypatch):
# classification dataset with numeric only columns
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 4)
target_shape = (150,)
frame_shape = (150, 5)
target_dtype = CategoricalDtype(
["Iris-setosa", "Iris-versicolor", "Iris-virginica"]
)
data_dtypes = [np.float64] * 4
data_names = ["sepallength", "sepalwidth", "petallength", "petalwidth"]
target_name = "class"
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.dtype == target_dtype
assert target.shape == target_shape
assert target.name == target_name
assert target.index.is_unique
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == data_dtypes + [target_dtype])
assert frame.index.is_unique
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_iris_pandas_equal_to_no_frame(monkeypatch):
# as_frame = True returns the same underlying data as as_frame = False
pytest.importorskip("pandas")
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
frame_bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
frame_data = frame_bunch.data
frame_target = frame_bunch.target
norm_bunch = fetch_openml(data_id=data_id, as_frame=False, cache=False)
norm_data = norm_bunch.data
norm_target = norm_bunch.target
assert_allclose(norm_data, frame_data)
assert_array_equal(norm_target, frame_target)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_iris_multitarget_pandas(monkeypatch):
# classification dataset with numeric only columns
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 3)
target_shape = (150, 2)
frame_shape = (150, 5)
target_column = ["petalwidth", "petallength"]
cat_dtype = CategoricalDtype(["Iris-setosa", "Iris-versicolor", "Iris-virginica"])
data_dtypes = [np.float64, np.float64] + [cat_dtype]
data_names = ["sepallength", "sepalwidth", "class"]
target_dtypes = [np.float64, np.float64]
target_names = ["petalwidth", "petallength"]
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(
data_id=data_id, as_frame=True, cache=False, target_column=target_column
)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == target_names
assert isinstance(target, pd.DataFrame)
assert np.all(target.dtypes == target_dtypes)
assert target.shape == target_shape
assert np.all(target.columns == target_names)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == [np.float64] * 4 + [cat_dtype])
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_anneal_pandas(monkeypatch):
# classification dataset with numeric and categorical columns
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 2
target_column = "class"
data_shape = (11, 38)
target_shape = (11,)
frame_shape = (11, 39)
expected_data_categories = 32
expected_data_floats = 6
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(
data_id=data_id, as_frame=True, target_column=target_column, cache=False
)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
n_categories = len(
[dtype for dtype in data.dtypes if isinstance(dtype, CategoricalDtype)]
)
n_floats = len([dtype for dtype in data.dtypes if dtype.kind == "f"])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert isinstance(target.dtype, CategoricalDtype)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_cpu_pandas(monkeypatch):
# regression dataset with numeric and categorical columns
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 561
data_shape = (209, 7)
target_shape = (209,)
frame_shape = (209, 8)
cat_dtype = CategoricalDtype(
[
"adviser",
"amdahl",
"apollo",
"basf",
"bti",
"burroughs",
"c.r.d",
"cdc",
"cambex",
"dec",
"dg",
"formation",
"four-phase",
"gould",
"hp",
"harris",
"honeywell",
"ibm",
"ipl",
"magnuson",
"microdata",
"nas",
"ncr",
"nixdorf",
"perkin-elmer",
"prime",
"siemens",
"sperry",
"sratus",
"wang",
]
)
data_dtypes = [cat_dtype] + [np.float64] * 6
feature_names = ["vendor", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX"]
target_name = "class"
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.dtypes == data_dtypes)
assert np.all(data.columns == feature_names)
assert np.all(bunch.feature_names == feature_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.dtype == np.float64
assert target.name == target_name
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
def test_fetch_openml_australian_pandas_error_sparse(monkeypatch):
data_id = 292
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
msg = "Cannot return dataframe with sparse data"
with pytest.raises(ValueError, match=msg):
fetch_openml(data_id=data_id, as_frame=True, cache=False)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_as_frame_auto(monkeypatch):
pd = pytest.importorskip("pandas")
data_id = 61 # iris dataset version 1
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
data = fetch_openml(data_id=data_id, as_frame="auto", cache=False)
assert isinstance(data.data, pd.DataFrame)
data_id = 292 # Australian dataset version 1
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
data = fetch_openml(data_id=data_id, as_frame="auto", cache=False)
assert isinstance(data.data, scipy.sparse.csr_matrix)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch):
pytest.importorskip("pandas")
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
msg = "Could not adhere to working_memory config."
with pytest.warns(UserWarning, match=msg):
with config_context(working_memory=1e-6):
fetch_openml(data_id=data_id, as_frame=True, cache=False)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_adultcensus_pandas_return_X_y(monkeypatch):
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 1119
data_shape = (10, 14)
target_shape = (10,)
expected_data_categories = 8
expected_data_floats = 6
target_column = "class"
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
X, y = fetch_openml(data_id=data_id, as_frame=True, cache=False, return_X_y=True)
assert isinstance(X, pd.DataFrame)
assert X.shape == data_shape
n_categories = len(
[dtype for dtype in X.dtypes if isinstance(dtype, CategoricalDtype)]
)
n_floats = len([dtype for dtype in X.dtypes if dtype.kind == "f"])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(y, pd.Series)
assert y.shape == target_shape
assert y.name == target_column
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_adultcensus_pandas(monkeypatch):
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
# Check because of the numeric row attribute (issue #12329)
data_id = 1119
data_shape = (10, 14)
target_shape = (10,)
frame_shape = (10, 15)
expected_data_categories = 8
expected_data_floats = 6
target_column = "class"
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
n_categories = len(
[dtype for dtype in data.dtypes if isinstance(dtype, CategoricalDtype)]
)
n_floats = len([dtype for dtype in data.dtypes if dtype.kind == "f"])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.name == target_column
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_miceprotein_pandas(monkeypatch):
# JvR: very important check, as this dataset defined several row ids
# and ignore attributes. Note that data_features json has 82 attributes,
# and row id (1), ignore attributes (3) have been removed.
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40966
data_shape = (7, 77)
target_shape = (7,)
frame_shape = (7, 78)
target_column = "class"
frame_n_categories = 1
frame_n_floats = 77
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.dtypes == np.float64)
assert isinstance(target, pd.Series)
assert isinstance(target.dtype, CategoricalDtype)
assert target.shape == target_shape
assert target.name == target_column
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
n_categories = len(
[dtype for dtype in frame.dtypes if isinstance(dtype, CategoricalDtype)]
)
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == "f"])
assert frame_n_categories == n_categories
assert frame_n_floats == n_floats
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_emotions_pandas(monkeypatch):
# classification dataset with multiple targets (natively)
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40589
target_column = [
"amazed.suprised",
"happy.pleased",
"relaxing.calm",
"quiet.still",
"sad.lonely",
"angry.aggresive",
]
data_shape = (13, 72)
target_shape = (13, 6)
frame_shape = (13, 78)
expected_frame_categories = 6
expected_frame_floats = 72
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(
data_id=data_id, as_frame=True, cache=False, target_column=target_column
)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert isinstance(target, pd.DataFrame)
assert target.shape == target_shape
assert np.all(target.columns == target_column)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
n_categories = len(
[dtype for dtype in frame.dtypes if isinstance(dtype, CategoricalDtype)]
)
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == "f"])
assert expected_frame_categories == n_categories
assert expected_frame_floats == n_floats
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_titanic_pandas(monkeypatch):
# dataset with strings
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40945
data_shape = (1309, 13)
target_shape = (1309,)
frame_shape = (1309, 14)
name_to_dtype = {
"pclass": np.float64,
"name": object,
"sex": CategoricalDtype(["female", "male"]),
"age": np.float64,
"sibsp": np.float64,
"parch": np.float64,
"ticket": object,
"fare": np.float64,
"cabin": object,
"embarked": CategoricalDtype(["C", "Q", "S"]),
"boat": object,
"body": np.float64,
"home.dest": object,
"survived": CategoricalDtype(["0", "1"]),
}
frame_columns = [
"pclass",
"survived",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
"boat",
"body",
"home.dest",
]
frame_dtypes = [name_to_dtype[col] for col in frame_columns]
feature_names = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
"boat",
"body",
"home.dest",
]
target_name = "survived"
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.columns == feature_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.name == target_name
assert target.dtype == name_to_dtype[target_name]
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == frame_dtypes)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_iris(monkeypatch, gzip_response):
# classification dataset with numeric only columns
data_id = 61
data_name = "iris"
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = (
"Multiple active versions of the dataset matching the name"
" iris exist. Versions may be fundamentally different, "
"returning version 1."
)
with pytest.warns(UserWarning, match=msg):
fetch_openml(name=data_name, as_frame=False, cache=False)
def test_decode_iris(monkeypatch):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_iris_multitarget(monkeypatch, gzip_response):
# classification dataset with numeric only columns
data_id = 61
data_name = "iris"
data_version = 1
target_column = ["sepallength", "sepalwidth"]
expected_observations = 150
expected_features = 3
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(
data_id,
data_name,
data_version,
target_column,
expected_observations,
expected_features,
expected_missing,
np.float64,
np.float64,
expect_sparse=False,
compare_default_target=False,
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_anneal(monkeypatch, gzip_response):
# classification dataset with numeric and categorical columns
data_id = 2
data_name = "anneal"
data_version = 1
target_column = "class"
# Not all original instances included for space reasons
expected_observations = 11
expected_features = 38
expected_missing = 267
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(
data_id,
data_name,
data_version,
target_column,
expected_observations,
expected_features,
expected_missing,
np.float64,
object,
expect_sparse=False,
compare_default_target=True,
)
def test_decode_anneal(monkeypatch):
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_anneal_multitarget(monkeypatch, gzip_response):
# classification dataset with numeric and categorical columns
data_id = 2
data_name = "anneal"
data_version = 1
target_column = ["class", "product-type", "shape"]
# Not all original instances included for space reasons
expected_observations = 11
expected_features = 36
expected_missing = 267
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(
data_id,
data_name,
data_version,
target_column,
expected_observations,
expected_features,
expected_missing,
np.float64,
object,
expect_sparse=False,
compare_default_target=False,
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_cpu(monkeypatch, gzip_response):
# regression dataset with numeric and categorical columns
data_id = 561
data_name = "cpu"
data_version = 1
target_column = "class"
expected_observations = 209
expected_features = 7
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(
data_id,
data_name,
data_version,
target_column,
expected_observations,
expected_features,
expected_missing,
np.float64,
np.float64,
expect_sparse=False,
compare_default_target=True,
)
def test_decode_cpu(monkeypatch):
data_id = 561
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_australian(monkeypatch, gzip_response):
# sparse dataset
# Australian is the only sparse dataset that is reasonably small
# as it is inactive, we need to catch the warning. Due to mocking
# framework, it is not deactivated in our tests
data_id = 292
data_name = "Australian"
data_version = 1
target_column = "Y"
# Not all original instances included for space reasons
expected_observations = 85
expected_features = 14
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "Version 1 of dataset Australian is inactive,"
with pytest.warns(UserWarning, match=msg):
_fetch_dataset_from_openml(
**{
"data_id": data_id,
"data_name": data_name,
"data_version": data_version,
"target_column": target_column,
"expected_observations": expected_observations,
"expected_features": expected_features,
"expected_missing": expected_missing,
"expect_sparse": True,
"expected_data_dtype": np.float64,
"expected_target_dtype": object,
"compare_default_target": False,
} # numpy specific check
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_adultcensus(monkeypatch, gzip_response):
# Check because of the numeric row attribute (issue #12329)
data_id = 1119
data_name = "adult-census"
data_version = 1
target_column = "class"
# Not all original instances included for space reasons
expected_observations = 10
expected_features = 14
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(
data_id,
data_name,
data_version,
target_column,
expected_observations,
expected_features,
expected_missing,
np.float64,
object,
expect_sparse=False,
compare_default_target=True,
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_miceprotein(monkeypatch, gzip_response):
# JvR: very important check, as this dataset defined several row ids
# and ignore attributes. Note that data_features json has 82 attributes,
# and row id (1), ignore attributes (3) have been removed (and target is
# stored in data.target)
data_id = 40966
data_name = "MiceProtein"
data_version = 4
target_column = "class"
# Not all original instances included for space reasons
expected_observations = 7
expected_features = 77
expected_missing = 7
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(
data_id,
data_name,
data_version,
target_column,
expected_observations,
expected_features,
expected_missing,
np.float64,
object,
expect_sparse=False,
compare_default_target=True,
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_emotions(monkeypatch, gzip_response):
# classification dataset with multiple targets (natively)
data_id = 40589
data_name = "emotions"
data_version = 3
target_column = [
"amazed.suprised",
"happy.pleased",
"relaxing.calm",
"quiet.still",
"sad.lonely",
"angry.aggresive",
]
expected_observations = 13
expected_features = 72
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(
data_id,
data_name,
data_version,
target_column,
expected_observations,
expected_features,
expected_missing,
np.float64,
object,
expect_sparse=False,
compare_default_target=True,
)
def test_decode_emotions(monkeypatch):
data_id = 40589
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_open_openml_url_cache(monkeypatch, gzip_response, tmpdir):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
# first fill the cache
response1 = _open_openml_url(openml_path, cache_directory)
# assert file exists
location = _get_local_path(openml_path, cache_directory)
assert os.path.isfile(location)
# redownload, to utilize cache
response2 = _open_openml_url(openml_path, cache_directory)
assert response1.read() == response2.read()
@pytest.mark.parametrize("gzip_response", [True, False])
@pytest.mark.parametrize("write_to_disk", [True, False])
def test_open_openml_url_unlinks_local_path(
monkeypatch, gzip_response, tmpdir, write_to_disk
):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
location = _get_local_path(openml_path, cache_directory)
def _mock_urlopen(request, *args, **kwargs):
if write_to_disk:
with open(location, "w") as f:
f.write("")
raise ValueError("Invalid request")
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen)
with pytest.raises(ValueError, match="Invalid request"):
_open_openml_url(openml_path, cache_directory)
assert not os.path.exists(location)
def test_retry_with_clean_cache(tmpdir):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
location = _get_local_path(openml_path, cache_directory)
os.makedirs(os.path.dirname(location))
with open(location, "w") as f:
f.write("")
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
# The first call will raise an error since location exists
if os.path.exists(location):
raise Exception("File exist!")
return 1
warn_msg = "Invalid cache, redownloading file"
with pytest.warns(RuntimeWarning, match=warn_msg):
result = _load_data()
assert result == 1
def test_retry_with_clean_cache_http_error(tmpdir):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
raise HTTPError(
url=None, code=412, msg="Simulated mock error", hdrs=None, fp=None
)
error_msg = "Simulated mock error"
with pytest.raises(HTTPError, match=error_msg):
_load_data()
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir):
def _mock_urlopen_raise(request, *args, **kwargs):
raise ValueError(
"This mechanism intends to test correct cache"
"handling. As such, urlopen should never be "
"accessed. URL: %s"
% request.get_full_url()
)
data_id = 2
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
X_fetched, y_fetched = fetch_openml(
data_id=data_id,
cache=True,
data_home=cache_directory,
return_X_y=True,
as_frame=False,
)
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen_raise)
X_cached, y_cached = fetch_openml(
data_id=data_id,
cache=True,
data_home=cache_directory,
return_X_y=True,
as_frame=False,
)
np.testing.assert_array_equal(X_fetched, X_cached)
np.testing.assert_array_equal(y_fetched, y_cached)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_notarget(monkeypatch, gzip_response):
data_id = 61
target_column = None
expected_observations = 150
expected_features = 5
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
data = fetch_openml(
data_id=data_id, target_column=target_column, cache=False, as_frame=False
)
assert data.data.shape == (expected_observations, expected_features)
assert data.target is None
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_inactive(monkeypatch, gzip_response):
# fetch inactive dataset by id
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "Version 1 of dataset glass2 is inactive,"
with pytest.warns(UserWarning, match=msg):
glas2 = fetch_openml(data_id=data_id, cache=False, as_frame=False)
# fetch inactive dataset by name and version
assert glas2.data.shape == (163, 9)
with pytest.warns(UserWarning, match=msg):
glas2_by_version = fetch_openml(
data_id=None, name="glass2", cache=False, version=1, as_frame=False
)
assert int(glas2_by_version.details["id"]) == data_id
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_nonexiting(monkeypatch, gzip_response):
# there is no active version of glass2
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# Note that we only want to search by name (not data id)
msg = "No active dataset glass2 found"
with pytest.raises(ValueError, match=msg):
fetch_openml(name="glass2", cache=False)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_raises_illegal_multitarget(monkeypatch, gzip_response):
data_id = 61
targets = ["sepalwidth", "class"]
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# Note that we only want to search by name (not data id)
msg = "Can only handle homogeneous multi-target datasets,"
with pytest.raises(ValueError, match=msg):
fetch_openml(data_id=data_id, target_column=targets, cache=False)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_warn_ignore_attribute(monkeypatch, gzip_response):
data_id = 40966
expected_row_id_msg = "target_column={} has flag is_row_identifier."
expected_ignore_msg = "target_column={} has flag is_ignore."
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
target_col = "MouseID"
msg = expected_row_id_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id, target_column=target_col, cache=False, as_frame=False
)
target_col = "Genotype"
msg = expected_ignore_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id, target_column=target_col, cache=False, as_frame=False
)
# multi column test
target_col = "MouseID"
msg = expected_row_id_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id,
target_column=[target_col, "class"],
cache=False,
as_frame=False,
)
target_col = "Genotype"
msg = expected_ignore_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id,
target_column=[target_col, "class"],
cache=False,
as_frame=False,
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_string_attribute_without_dataframe(monkeypatch, gzip_response):
data_id = 40945
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
msg = (
"STRING attributes are not supported for "
"array representation. Try as_frame=True"
)
with pytest.raises(ValueError, match=msg):
fetch_openml(data_id=data_id, cache=False, as_frame=False)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_dataset_with_openml_error(monkeypatch, gzip_response):
data_id = 1
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "OpenML registered a problem with the dataset. It might be unusable. Error:"
with pytest.warns(UserWarning, match=msg):
fetch_openml(data_id=data_id, cache=False, as_frame=False)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_dataset_with_openml_warning(monkeypatch, gzip_response):
data_id = 3
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "OpenML raised a warning on the dataset. It might be unusable. Warning:"
with pytest.warns(UserWarning, match=msg):
fetch_openml(data_id=data_id, cache=False, as_frame=False)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_illegal_column(monkeypatch, gzip_response):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "Could not find target_column="
with pytest.raises(KeyError, match=msg):
fetch_openml(data_id=data_id, target_column="undefined", cache=False)
with pytest.raises(KeyError, match=msg):
fetch_openml(data_id=data_id, target_column=["undefined", "class"], cache=False)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_raises_missing_values_target(monkeypatch, gzip_response):
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "Target column "
with pytest.raises(ValueError, match=msg):
fetch_openml(data_id=data_id, target_column="family")
def test_fetch_openml_raises_illegal_argument():
msg = "Dataset data_id=-1 and version=version passed, but you can only"
with pytest.raises(ValueError, match=msg):
fetch_openml(data_id=-1, name=None, version="version")
msg = "Dataset data_id=-1 and name=name passed, but you can only"
with pytest.raises(ValueError, match=msg):
fetch_openml(data_id=-1, name="nAmE")
with pytest.raises(ValueError, match=msg):
fetch_openml(data_id=-1, name="nAmE", version="version")
msg = "Neither name nor data_id are provided. Please provide name or data_id."
with pytest.raises(ValueError, match=msg):
fetch_openml()
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_with_ignored_feature(monkeypatch, gzip_response):
# Regression test for #14340
# 62 is the ID of the ZOO dataset
data_id = 62
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
dataset = sklearn.datasets.fetch_openml(
data_id=data_id, cache=False, as_frame=False
)
assert dataset is not None
# The dataset has 17 features, including 1 ignored (animal),
# so we assert that we don't have the ignored feature in the final Bunch
assert dataset["data"].shape == (101, 16)
assert "animal" not in dataset["feature_names"]
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize("as_frame", [True, False])
def test_fetch_openml_verify_checksum(monkeypatch, as_frame, cache, tmpdir):
if as_frame:
pytest.importorskip("pandas")
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
# create a temporary modified arff file
original_data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}"
original_data_file_name = "data-v1-dl-1666876.arff.gz"
corrupt_copy_path = tmpdir / "test_invalid_checksum.arff"
with resources.open_binary(
original_data_module, original_data_file_name
) as orig_file:
orig_gzip = gzip.open(orig_file, "rb")
data = bytearray(orig_gzip.read())
data[len(data) - 1] = 37
with gzip.GzipFile(corrupt_copy_path, "wb") as modified_gzip:
modified_gzip.write(data)
# Requests are already mocked by monkey_patch_webbased_functions.
# We want to re-use that mock for all requests except file download,
# hence creating a thin mock over the original mock
mocked_openml_url = sklearn.datasets._openml.urlopen
def swap_file_mock(request, *args, **kwargs):
url = request.get_full_url()
if url.endswith("data/v1/download/1666876"):
with open(corrupt_copy_path, "rb") as f:
corrupted_data = f.read()
return _MockHTTPResponse(BytesIO(corrupted_data), is_gzip=True)
else:
return mocked_openml_url(request)
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", swap_file_mock)
# validate failed checksum
with pytest.raises(ValueError) as exc:
sklearn.datasets.fetch_openml(data_id=data_id, cache=False, as_frame=as_frame)
# exception message should have file-path
assert exc.match("1666876")
def test_convert_arff_data_type():
pytest.importorskip("pandas")
arff: ArffContainerType = {
"data": (el for el in range(2)),
"description": "",
"relation": "",
"attributes": [],
}
msg = r"shape must be provided when arr\['data'\] is a Generator"
with pytest.raises(ValueError, match=msg):
_convert_arff_data(arff, [0], [0], shape=None)
arff = {"data": list(range(2)), "description": "", "relation": "", "attributes": []}
msg = r"arff\['data'\] must be a generator when converting to pd.DataFrame"
with pytest.raises(ValueError, match=msg):
_convert_arff_data_dataframe(arff, ["a"], {})
def test_missing_values_pandas(monkeypatch):
"""check that missing values in categories are compatible with pandas
categorical"""
pytest.importorskip("pandas")
data_id = 42585
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
penguins = fetch_openml(data_id=data_id, cache=False, as_frame=True)
cat_dtype = penguins.data.dtypes["sex"]
# there are nans in the categorical
assert penguins.data["sex"].isna().any()
assert_array_equal(cat_dtype.categories, ["FEMALE", "MALE", "_"])
def test_open_openml_url_retry_on_network_error(monkeypatch):
def _mock_urlopen_network_error(request, *args, **kwargs):
raise HTTPError("", 404, "Simulated network error", None, None)
monkeypatch.setattr(
sklearn.datasets._openml, "urlopen", _mock_urlopen_network_error
)
invalid_openml_url = "invalid-url"
with pytest.warns(
UserWarning,
match=re.escape(
"A network error occured while downloading"
f" {_OPENML_PREFIX + invalid_openml_url}. Retrying..."
),
) as record:
with pytest.raises(HTTPError, match="Simulated network error"):
_open_openml_url(invalid_openml_url, None, delay=0)
assert len(record) == 3
|
manhhomienbienthuy/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
Python
|
bsd-3-clause
| 53,604
|
from glob import glob
import os
try:
from skimage.io import imread as sk_imread
except ImportError:
pass
from .core import Array
from ..base import tokenize
def add_leading_dimension(x):
return x[None, ...]
def imread(filename, imread=None, preprocess=None):
""" Read a stack of images into a dask array
Parameters
----------
filename: string
A globstring like 'myfile.*.png'
imread: function (optional)
Optionally provide custom imread function.
Function should expect a filename and produce a numpy array.
Defaults to ``skimage.io.imread``.
preprocess: function (optional)
Optionally provide custom function to preprocess the image.
Function should expect a numpy array for a single image.
Example
-------
>>> from dask.array.image import imread
>>> im = imread('2015-*-*.png') # doctest: +SKIP
>>> im.shape # doctest: +SKIP
(365, 1000, 1000, 3)
Returns
-------
Dask array of all images stacked along the first dimension. All images
will be treated as individual chunks
"""
imread = imread or sk_imread
filenames = sorted(glob(filename))
if not filenames:
raise ValueError("No files found under name %s" % filename)
name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))
sample = imread(filenames[0])
if preprocess:
sample = preprocess(sample)
keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]
if preprocess:
values = [(add_leading_dimension, (preprocess, (imread, filename)))
for filename in filenames]
else:
values = [(add_leading_dimension, (imread, filename))
for filename in filenames]
dsk = dict(zip(keys, values))
chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)
return Array(dsk, name, chunks, sample.dtype)
|
PhE/dask
|
dask/array/image.py
|
Python
|
bsd-3-clause
| 1,949
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import pyglet
from pyglet.libs.darwin import carbon, _oscheck, create_cfstring
from pyglet.libs.darwin.constants import *
from base import Device, Control, AbsoluteAxis, RelativeAxis, Button
from base import Joystick, AppleRemote
from base import DeviceExclusiveException
# non-broken c_void_p
void_p = ctypes.POINTER(ctypes.c_int)
class CFUUIDBytes(ctypes.Structure):
_fields_ = [('byte%d' % i, ctypes.c_uint8) for i in range(16)]
mach_port_t = void_p
io_iterator_t = void_p
kern_return_t = ctypes.c_int
IOReturn = ctypes.c_uint
CFDictionaryRef = void_p
CFMutableDictionaryRef = void_p
CFArrayRef = void_p
CFStringRef = void_p
CFUUIDRef = ctypes.POINTER(CFUUIDBytes)
AbsoluteTime = ctypes.c_double
HRESULT = ctypes.c_int
REFIID = CFUUIDBytes
IOHIDElementType = ctypes.c_int
kIOHIDElementTypeInput_Misc = 1
kIOHIDElementTypeInput_Button = 2
kIOHIDElementTypeInput_Axis = 3
kIOHIDElementTypeInput_ScanCodes = 4
kIOHIDElementTypeOutput = 129
kIOHIDElementTypeFeature = 257
kIOHIDElementTypeCollection = 513
IOHIDElementCookie = ctypes.c_void_p
# Full list in IOHIDUsageTables.h
kHIDPage_GenericDesktop = 0x01
kHIDUsage_GD_Joystick = 0x04
kHIDUsage_GD_GamePad = 0x05
kHIDUsage_GD_Keyboard = 0x06
kHIDUsage_GD_Keypad = 0x07
kHIDUsage_GD_MultiAxisController = 0x08
kHIDUsage_GD_SystemAppMenu = 0x86
kHIDUsage_GD_SystemMenu = 0x89
kHIDUsage_GD_SystemMenuRight = 0x8A
kHIDUsage_GD_SystemMenuLeft = 0x8B
kHIDUsage_GD_SystemMenuUp = 0x8C
kHIDUsage_GD_SystemMenuDown = 0x8D
MACH_PORT_NULL = 0
kIOHIDDeviceKey = "IOHIDDevice"
kIOServicePlane = "IOService"
kIOHIDProductIDKey = "ProductID"
kCFNumberIntType = 9
kIOHIDOptionsTypeSeizeDevice = 1
kIOReturnExclusiveAccess = 0xe00002c5
carbon.CFUUIDGetConstantUUIDWithBytes.restype = CFUUIDRef
kIOHIDDeviceUserClientTypeID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xFA, 0x12, 0xFA, 0x38, 0x6F, 0x1A, 0x11, 0xD4,
0xBA, 0x0C, 0x00, 0x05, 0x02, 0x8F, 0x18, 0xD5)
kIOCFPlugInInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xC2, 0x44, 0xE8, 0x58, 0x10, 0x9C, 0x11, 0xD4,
0x91, 0xD4, 0x00, 0x50, 0xE4, 0xC6, 0x42, 0x6F)
kIOHIDDeviceInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0x78, 0xBD, 0x42, 0x0C, 0x6F, 0x14, 0x11, 0xD4,
0x94, 0x74, 0x00, 0x05, 0x02, 0x8F, 0x18, 0xD5)
IOHIDCallbackFunction = ctypes.CFUNCTYPE(None,
void_p, IOReturn, ctypes.c_void_p, ctypes.c_void_p)
CFRunLoopSourceRef = ctypes.c_void_p
class IOHIDEventStruct(ctypes.Structure):
_fields_ = (
('type', IOHIDElementType),
('elementCookie', IOHIDElementCookie),
('value', ctypes.c_int32),
('timestamp', AbsoluteTime),
('longValueSize', ctypes.c_uint32),
('longValue', ctypes.c_void_p)
)
Self = ctypes.c_void_p
class IUnknown(ctypes.Structure):
_fields_ = (
('_reserved', ctypes.c_void_p),
('QueryInterface',
ctypes.CFUNCTYPE(HRESULT, Self, REFIID, ctypes.c_void_p)),
('AddRef',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
('Release',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
)
# Most of these function prototypes are not filled in yet because I haven't
# bothered.
class IOHIDQueueInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.POINTER(CFRunLoopSourceRef))),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('create', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32, ctypes.c_uint32)),
('dispose', ctypes.CFUNCTYPE(IOReturn,
Self)),
('addElement', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie)),
('removeElement', ctypes.c_void_p),
('hasElement', ctypes.c_void_p),
('start', ctypes.CFUNCTYPE(IOReturn,
Self)),
('stop', ctypes.CFUNCTYPE(IOReturn,
Self)),
('getNextEvent', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.POINTER(IOHIDEventStruct), AbsoluteTime,
ctypes.c_uint32)),
('setEventCallout', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDCallbackFunction, ctypes.c_void_p, ctypes.c_void_p)),
('getEventCallout', ctypes.c_void_p),
)
class IOHIDDeviceInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.c_void_p),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('open', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32)),
('close', ctypes.CFUNCTYPE(IOReturn,
Self)),
('setRemovalCallback', ctypes.c_void_p),
('getElementValue', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie, ctypes.POINTER(IOHIDEventStruct))),
('setElementValue', ctypes.c_void_p),
('queryElementValue', ctypes.c_void_p),
('startAllQueues', ctypes.c_void_p),
('stopAllQueues', ctypes.c_void_p),
('allocQueue', ctypes.CFUNCTYPE(
ctypes.POINTER(ctypes.POINTER(IOHIDQueueInterface)),
Self)),
('allocOutputTransaction', ctypes.c_void_p),
# 1.2.1 (10.2.3)
('setReport', ctypes.c_void_p),
('getReport', ctypes.c_void_p),
# 1.2.2 (10.3)
('copyMatchingElements', ctypes.CFUNCTYPE(IOReturn,
Self, CFDictionaryRef, ctypes.POINTER(CFArrayRef))),
('setInterruptReportHandlerCallback', ctypes.c_void_p),
)
def get_master_port():
master_port = mach_port_t()
_oscheck(
carbon.IOMasterPort(MACH_PORT_NULL, ctypes.byref(master_port))
)
return master_port
def get_matching_dictionary():
carbon.IOServiceMatching.restype = CFMutableDictionaryRef
matching_dictionary = carbon.IOServiceMatching(kIOHIDDeviceKey)
return matching_dictionary
def get_matching_services(master_port, matching_dictionary):
# Consumes reference to matching_dictionary
iterator = io_iterator_t()
_oscheck(
carbon.IOServiceGetMatchingServices(master_port,
matching_dictionary,
ctypes.byref(iterator))
)
services = []
while carbon.IOIteratorIsValid(iterator):
service = carbon.IOIteratorNext(iterator)
if not service:
break
services.append(service)
carbon.IOObjectRelease(iterator)
return services
def cfstring_to_string(value_string):
value_length = carbon.CFStringGetLength(value_string)
buffer_length = carbon.CFStringGetMaximumSizeForEncoding(
value_length, kCFStringEncodingUTF8)
buffer = ctypes.c_buffer(buffer_length + 1)
result = carbon.CFStringGetCString(value_string,
buffer,
len(buffer),
kCFStringEncodingUTF8)
if not result:
return
return buffer.value
def cfnumber_to_int(value):
result = ctypes.c_int()
carbon.CFNumberGetValue(value, kCFNumberIntType, ctypes.byref(result))
return result.value
def cfboolean_to_bool(value):
return bool(carbon.CFBooleanGetValue(value))
def cfvalue_to_value(value):
if not value:
return None
value_type = carbon.CFGetTypeID(value)
if value_type == carbon.CFStringGetTypeID():
return cfstring_to_string(value)
elif value_type == carbon.CFNumberGetTypeID():
return cfnumber_to_int(value)
elif value_type == carbon.CFBooleanGetTypeID():
return cfboolean_to_bool(value)
else:
return None
def get_property_value(properties, key):
key_string = create_cfstring(key)
value = ctypes.c_void_p()
present = carbon.CFDictionaryGetValueIfPresent(properties,
key_string,
ctypes.byref(value))
carbon.CFRelease(key_string)
if not present:
return None
return value
def get_property(properties, key):
return cfvalue_to_value(get_property_value(properties, key))
def dump_properties(properties):
def func(key, value, context):
print '%s = %s' % (cfstring_to_string(key), cfvalue_to_value(value))
CFDictionaryApplierFunction = ctypes.CFUNCTYPE(None,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
carbon.CFDictionaryApplyFunction(properties,
CFDictionaryApplierFunction(func), None)
class DarwinHIDDevice(Device):
'''
:IVariables:
`name` : str
`manufacturer` : str
'''
def __init__(self, display, generic_device):
super(DarwinHIDDevice, self).__init__(display, name=None)
self._device = self._get_device_interface(generic_device)
properties = CFMutableDictionaryRef()
_oscheck(
carbon.IORegistryEntryCreateCFProperties(generic_device,
ctypes.byref(properties),
None, 0)
)
self.name = get_property(properties, "Product")
self.manufacturer = get_property(properties, "Manufacturer")
self.usage_page = get_property(properties, 'PrimaryUsagePage')
self.usage = get_property(properties, 'PrimaryUsage')
carbon.CFRelease(properties)
self._controls = self._init_controls()
self._open = False
self._queue = None
self._queue_depth = 8 # Number of events queue can buffer
def _get_device_interface(self, generic_device):
plug_in_interface = \
ctypes.POINTER(ctypes.POINTER(IUnknown))()
score = ctypes.c_int32()
_oscheck(
carbon.IOCreatePlugInInterfaceForService(
generic_device,
kIOHIDDeviceUserClientTypeID,
kIOCFPlugInInterfaceID,
ctypes.byref(plug_in_interface),
ctypes.byref(score))
)
carbon.CFUUIDGetUUIDBytes.restype = CFUUIDBytes
hid_device_interface = \
ctypes.POINTER(ctypes.POINTER(IOHIDDeviceInterface))()
_oscheck(
plug_in_interface.contents.contents.QueryInterface(
plug_in_interface,
carbon.CFUUIDGetUUIDBytes(kIOHIDDeviceInterfaceID),
ctypes.byref(hid_device_interface))
)
plug_in_interface.contents.contents.Release(plug_in_interface)
return hid_device_interface
def _init_controls(self):
elements_array = CFArrayRef()
_oscheck(
self._device.contents.contents.copyMatchingElements(self._device,
None, ctypes.byref(elements_array))
)
self._control_cookies = {}
controls = []
n_elements = carbon.CFArrayGetCount(elements_array)
for i in range(n_elements):
properties = carbon.CFArrayGetValueAtIndex(elements_array, i)
control = _create_control(properties)
if control:
controls.append(control)
self._control_cookies[control._cookie] = control
carbon.CFRelease(elements_array)
return controls
def open(self, window=None, exclusive=False):
super(DarwinHIDDevice, self).open(window, exclusive)
flags = 0
if exclusive:
flags |= kIOHIDOptionsTypeSeizeDevice
result = self._device.contents.contents.open(self._device, flags)
if result == 0:
self._open = True
elif result == kIOReturnExclusiveAccess:
raise DeviceExclusiveException()
# Create event queue
self._queue = self._device.contents.contents.allocQueue(self._device)
_oscheck(
self._queue.contents.contents.create(self._queue,
0, self._queue_depth)
)
# Add all controls into queue
for control in self._controls:
r = self._queue.contents.contents.addElement(self._queue,
control._cookie, 0)
if r != 0:
print 'error adding %r' % control
self._event_source = CFRunLoopSourceRef()
self._queue_callback_func = IOHIDCallbackFunction(self._queue_callback)
_oscheck(
self._queue.contents.contents.createAsyncEventSource(self._queue,
ctypes.byref(self._event_source))
)
_oscheck(
self._queue.contents.contents.setEventCallout(self._queue,
self._queue_callback_func, None, None)
)
event_loop = pyglet.app.platform_event_loop._event_loop
carbon.GetCFRunLoopFromEventLoop.restype = void_p
run_loop = carbon.GetCFRunLoopFromEventLoop(event_loop)
kCFRunLoopDefaultMode = \
CFStringRef.in_dll(carbon, 'kCFRunLoopDefaultMode')
carbon.CFRunLoopAddSource(run_loop,
self._event_source,
kCFRunLoopDefaultMode)
_oscheck(
self._queue.contents.contents.start(self._queue)
)
def close(self):
super(DarwinHIDDevice, self).close()
if not self._open:
return
_oscheck(
self._queue.contents.contents.stop(self._queue)
)
_oscheck(
self._queue.contents.contents.dispose(self._queue)
)
self._queue.contents.contents.Release(self._queue)
self._queue = None
_oscheck(
self._device.contents.contents.close(self._device)
)
self._open = False
def get_controls(self):
return self._controls
def _queue_callback(self, target, result, refcon, sender):
if not self._open:
return
event = IOHIDEventStruct()
r = self._queue.contents.contents.getNextEvent(self._queue,
ctypes.byref(event), 0, 0)
while r == 0:
try:
control = self._control_cookies[event.elementCookie]
control._set_value(event.value)
except KeyError:
pass
r = self._queue.contents.contents.getNextEvent(self._queue,
ctypes.byref(event), 0, 0)
_axis_names = {
(0x01, 0x30): 'x',
(0x01, 0x31): 'y',
(0x01, 0x32): 'z',
(0x01, 0x33): 'rx',
(0x01, 0x34): 'ry',
(0x01, 0x35): 'rz',
(0x01, 0x38): 'wheel',
(0x01, 0x39): 'hat',
}
_button_names = {
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemAppMenu): 'menu',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenu): 'select',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuRight): 'right',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuLeft): 'left',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuUp): 'up',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuDown): 'down',
}
def _create_control(properties):
type = get_property(properties, 'Type')
if type not in (kIOHIDElementTypeInput_Misc,
kIOHIDElementTypeInput_Axis,
kIOHIDElementTypeInput_Button):
return
cookie = get_property(properties, 'ElementCookie')
usage_page = get_property(properties, 'UsagePage')
usage = get_property(properties, 'Usage')
raw_name = get_property(properties, 'Name')
if not raw_name:
raw_name = '%d:%d' % (usage_page, usage)
if type in (kIOHIDElementTypeInput_Misc, kIOHIDElementTypeInput_Axis):
name = _axis_names.get((usage_page, usage))
relative = get_property(properties, 'IsRelative')
if relative:
control = RelativeAxis(name, raw_name)
else:
min = get_property(properties, 'Min')
max = get_property(properties, 'Max')
control = AbsoluteAxis(name, min, max, raw_name)
elif type == kIOHIDElementTypeInput_Button:
name = _button_names.get((usage_page, usage))
control = Button(name, raw_name)
else:
return
control._cookie = cookie
return control
def _create_joystick(device):
# Ignore desktop devices that are not joysticks, gamepads or m-a controllers
if device.usage_page == kHIDPage_GenericDesktop and \
device.usage not in (kHIDUsage_GD_Joystick,
kHIDUsage_GD_GamePad,
kHIDUsage_GD_MultiAxisController):
return
# Anything else is interesting enough to be a joystick?
return Joystick(device)
def get_devices(display=None):
services = get_matching_services(get_master_port(),
get_matching_dictionary())
return [DarwinHIDDevice(display, service) for service in services]
def get_joysticks(display=None):
return filter(None,
[_create_joystick(device) for device in get_devices(display)])
def get_apple_remote(display=None):
for device in get_devices(display):
if device.name == 'Apple IR':
return AppleRemote(device)
|
regular/pyglet-avbin-optimizations
|
pyglet/input/darwin_hid.py
|
Python
|
bsd-3-clause
| 17,379
|
import numpy as np
import regreg.api as rr
from selection.randomized.glm import pairs_bootstrap_glm, bootstrap_cov
from selection.randomized.query import query
from selection.randomized.randomization import split
import functools
def pairs_bootstrap_glm(glm_loss,
active,
beta_full=None,
inactive=None,
scaling=1.,
solve_args={'min_its':50, 'tol':1.e-10}):
"""
pairs bootstrap of (beta_hat_active, -grad_inactive(beta_hat_active))
"""
X, Y = glm_loss.data
if beta_full is None:
beta_active = restricted_Mest(glm_loss, active, solve_args=solve_args)
beta_full = np.zeros(glm_loss.shape)
beta_full[active] = beta_active
else:
beta_active = beta_full[active]
X_active = X[:,active]
nactive = active.sum()
ntotal = nactive
if inactive is not None:
X_inactive = X[:,inactive]
ntotal += inactive.sum()
_bootW = np.diag(glm_loss.saturated_loss.hessian(X_active.dot(beta_active)))
_bootQ = X_active.T.dot(_bootW.dot(X_active))
_bootQinv = np.linalg.inv(_bootQ)
if inactive is not None:
_bootC = X_inactive.T.dot(_bootW.dot(X_active))
_bootI = _bootC.dot(_bootQinv)
else:
_bootI = None
nactive = active.sum()
if inactive is not None:
X_full = np.hstack([X_active,X_inactive])
beta_overall = np.zeros(X_full.shape[1])
beta_overall[:nactive] = beta_active
else:
X_full = X_active
beta_overall = beta_active
_boot_mu = lambda X_full, beta_overall: glm_loss.saturated_loss.mean_function(X_full.dot(beta_overall))
if ntotal > nactive:
observed = np.hstack([beta_active, -glm_loss.smooth_objective(beta_full, 'grad')[inactive]])
else:
observed = beta_active
# scaling is a lipschitz constant for a gradient squared
_sqrt_scaling = np.sqrt(scaling)
def _boot_score(X_full, Y, ntotal, _bootQinv, _bootI, nactive, _sqrt_scaling, beta_overall, indices):
X_star = X_full[indices]
Y_star = Y[indices]
score = X_star.T.dot(Y_star - _boot_mu(X_star, beta_overall))
result = np.zeros(ntotal)
result[:nactive] = _bootQinv.dot(score[:nactive])
if ntotal > nactive:
result[nactive:] = score[nactive:] - _bootI.dot(score[:nactive])
result[:nactive] *= _sqrt_scaling
result[nactive:] /= _sqrt_scaling
return result
observed[:nactive] *= _sqrt_scaling
observed[nactive:] /= _sqrt_scaling
return functools.partial(_boot_score, X_full, Y, ntotal, _bootQinv, _bootI, nactive, _sqrt_scaling, beta_overall), observed
def pairs_bootstrap_score(glm_loss,
active,
beta_active=None,
solve_args={'min_its':50, 'tol':1.e-10}):
"""
pairs bootstrap of (beta_hat_active, -grad_inactive(beta_hat_active))
"""
X, Y = glm_loss.data
if beta_active is None:
beta_active = restricted_Mest(glm_loss, active, solve_args=solve_args)
X_active = X[:,active]
_bootW = np.diag(glm_loss.saturated_loss.hessian(X_active.dot(beta_active)))
_boot_mu = lambda X_active, beta_active: glm_loss.saturated_loss.mean_function(X_active.dot(beta_active))
def _boot_score(X, Y, active, beta_active, indices):
X_star = X[indices]
Y_star = Y[indices]
score = -X_star.T.dot(Y_star - _boot_mu(X_star[:,active], beta_active))
return score
return functools.partial(_boot_score, X, Y, active, beta_active)
def set_alpha_matrix(glm_loss,
active,
beta_full=None,
inactive=None,
scaling=1.,
solve_args={'min_its': 50, 'tol': 1.e-10}):
X, Y = glm_loss.data
if beta_full is None:
beta_active = restricted_Mest(glm_loss, active, solve_args=solve_args)
beta_full = np.zeros(glm_loss.shape)
beta_full[active] = beta_active
else:
beta_active = beta_full[active]
X_active = X[:,active]
nactive = active.sum()
ntotal = nactive
if inactive is not None:
X_inactive = X[:,inactive]
ntotal += inactive.sum()
_W = np.diag(glm_loss.saturated_loss.hessian(X_active.dot(beta_active)))
_Q = X_active.T.dot(_W.dot(X_active))
_Qinv = np.linalg.inv(_Q)
nactive = active.sum()
if inactive is not None:
X_full = np.hstack([X_active, X_inactive])
beta_overall = np.zeros(X_full.shape[1])
beta_overall[:nactive] = beta_active
else:
X_full = X_active
beta_overall = beta_active
obs_residuals = Y - glm_loss.saturated_loss.mean_function(X_full.dot(beta_overall))
return np.dot(np.dot(_Qinv, X_active.T), np.diag(obs_residuals))
class M_estimator(query):
def __init__(self, loss, epsilon, penalty, randomization, solve_args={'min_its':50, 'tol':1.e-10}):
"""
Fits the logistic regression to a candidate active set, without penalty.
Calls the method bootstrap_covariance() to bootstrap the covariance matrix.
Computes $\bar{\beta}_E$ which is the restricted
M-estimator (i.e. subject to the constraint $\beta_{-E}=0$).
Parameters:
-----------
active: np.bool
The active set from fitting the logistic lasso
solve_args: dict
Arguments to be passed to regreg solver.
Returns:
--------
None
Notes:
------
Sets self._beta_unpenalized which will be used in the covariance matrix calculation.
Also computes Hessian of loss at restricted M-estimator as well as the bootstrap covariance.
"""
query.__init__(self, randomization)
(self.loss,
self.epsilon,
self.penalty,
self.randomization,
self.solve_args) = (loss,
epsilon,
penalty,
randomization,
solve_args)
# Methods needed for subclassing a query
def solve(self, scaling=1, solve_args={'min_its':20, 'tol':1.e-10}):
self.randomize()
(loss,
randomized_loss,
epsilon,
penalty,
randomization,
solve_args) = (self.loss,
self.randomized_loss,
self.epsilon,
self.penalty,
self.randomization,
self.solve_args)
# initial solution
problem = rr.simple_problem(randomized_loss, penalty)
self.initial_soln = problem.solve(**solve_args)
# find the active groups and their direction vectors
# as well as unpenalized groups
groups = np.unique(penalty.groups)
active_groups = np.zeros(len(groups), np.bool)
unpenalized_groups = np.zeros(len(groups), np.bool)
active_directions = []
active = np.zeros(loss.shape, np.bool)
unpenalized = np.zeros(loss.shape, np.bool)
initial_scalings = []
for i, g in enumerate(groups):
group = penalty.groups == g
active_groups[i] = (np.linalg.norm(self.initial_soln[group]) > 1.e-6 * penalty.weights[g]) and (penalty.weights[g] > 0)
unpenalized_groups[i] = (penalty.weights[g] == 0)
if active_groups[i]:
active[group] = True
z = np.zeros(active.shape, np.float)
z[group] = self.initial_soln[group] / np.linalg.norm(self.initial_soln[group])
active_directions.append(z)
initial_scalings.append(np.linalg.norm(self.initial_soln[group]))
if unpenalized_groups[i]:
unpenalized[group] = True
# solve the restricted problem
self._overall = active + unpenalized
self._inactive = ~self._overall
self._unpenalized = unpenalized
self._active_directions = np.array(active_directions).T
self._active_groups = np.array(active_groups, np.bool)
self._unpenalized_groups = np.array(unpenalized_groups, np.bool)
self.selection_variable = {'groups':self._active_groups,
'variables':self._overall,
'directions':self._active_directions}
# initial state for opt variables
initial_subgrad = -(self.randomized_loss.smooth_objective(self.initial_soln, 'grad') +
self.randomized_loss.quadratic.objective(self.initial_soln, 'grad'))
# the quadratic of a smooth_atom is not included in computing the smooth_objective
initial_subgrad = initial_subgrad[self._inactive]
initial_unpenalized = self.initial_soln[self._unpenalized]
self.observed_opt_state = np.concatenate([initial_scalings,
initial_unpenalized,
initial_subgrad], axis=0)
# set the _solved bit
self._solved = True
# Now setup the pieces for linear decomposition
(loss,
epsilon,
penalty,
initial_soln,
overall,
inactive,
unpenalized,
active_groups,
active_directions) = (self.loss,
self.epsilon,
self.penalty,
self.initial_soln,
self._overall,
self._inactive,
self._unpenalized,
self._active_groups,
self._active_directions)
# scaling should be chosen to be Lipschitz constant for gradient of Gaussian part
# we are implicitly assuming that
# loss is a pairs model
_sqrt_scaling = np.sqrt(scaling)
_beta_unpenalized = restricted_Mest(loss, overall, solve_args=solve_args)
beta_full = np.zeros(overall.shape)
beta_full[overall] = _beta_unpenalized
_hessian = loss.hessian(beta_full)
self._beta_full = beta_full
# observed state for score
self.observed_score_state = np.hstack([_beta_unpenalized * _sqrt_scaling,
-loss.smooth_objective(beta_full, 'grad')[inactive] / _sqrt_scaling])
# form linear part
self.num_opt_var = p = loss.shape[0] # shorthand for p
# (\bar{\beta}_{E \cup U}, N_{-E}, c_E, \beta_U, z_{-E})
# E for active
# U for unpenalized
# -E for inactive
_opt_linear_term = np.zeros((p, self._active_groups.sum() + unpenalized.sum() + inactive.sum()))
_score_linear_term = np.zeros((p, p))
# \bar{\beta}_{E \cup U} piece -- the unpenalized M estimator
Mest_slice = slice(0, overall.sum())
_Mest_hessian = _hessian[:,overall]
_score_linear_term[:,Mest_slice] = -_Mest_hessian / _sqrt_scaling
# N_{-(E \cup U)} piece -- inactive coordinates of score of M estimator at unpenalized solution
null_idx = range(overall.sum(), p)
inactive_idx = np.nonzero(inactive)[0]
for _i, _n in zip(inactive_idx, null_idx):
_score_linear_term[_i,_n] = -_sqrt_scaling
# c_E piece
scaling_slice = slice(0, active_groups.sum())
if len(active_directions)==0:
_opt_hessian=0
else:
_opt_hessian = (_hessian + epsilon * np.identity(p)).dot(active_directions)
_opt_linear_term[:,scaling_slice] = _opt_hessian / _sqrt_scaling
self.observed_opt_state[scaling_slice] *= _sqrt_scaling
# beta_U piece
unpenalized_slice = slice(active_groups.sum(), active_groups.sum() + unpenalized.sum())
unpenalized_directions = np.identity(p)[:,unpenalized]
if unpenalized.sum():
_opt_linear_term[:,unpenalized_slice] = (_hessian + epsilon * np.identity(p)).dot(unpenalized_directions) / _sqrt_scaling
self.observed_opt_state[unpenalized_slice] *= _sqrt_scaling
# subgrad piece
subgrad_idx = range(active_groups.sum() + unpenalized.sum(), active_groups.sum() + inactive.sum() + unpenalized.sum())
subgrad_slice = slice(active_groups.sum() + unpenalized.sum(), active_groups.sum() + inactive.sum() + unpenalized.sum())
for _i, _s in zip(inactive_idx, subgrad_idx):
_opt_linear_term[_i,_s] = _sqrt_scaling
self.observed_opt_state[subgrad_slice] /= _sqrt_scaling
# form affine part
_opt_affine_term = np.zeros(p)
idx = 0
groups = np.unique(penalty.groups)
for i, g in enumerate(groups):
if active_groups[i]:
group = penalty.groups == g
_opt_affine_term[group] = active_directions[:,idx][group] * penalty.weights[g]
idx += 1
# two transforms that encode score and optimization
# variable roles
self.opt_transform = (_opt_linear_term, _opt_affine_term)
self.score_transform = (_score_linear_term, np.zeros(_score_linear_term.shape[0]))
# later, we will modify `score_transform`
# in `linear_decomposition`
# now store everything needed for the projections
# the projection acts only on the optimization
# variables
self.scaling_slice = scaling_slice
# weights are scaled here because the linear terms scales them by scaling
new_groups = penalty.groups[inactive]
new_weights = dict([(g, penalty.weights[g] / _sqrt_scaling) for g in penalty.weights.keys() if g in np.unique(new_groups)])
# we form a dual group lasso object
# to do the projection
self.group_lasso_dual = rr.group_lasso_dual(new_groups, weights=new_weights, bound=1.)
self.subgrad_slice = subgrad_slice
self._setup = True
def setup_sampler(self, scaling=1, solve_args={'min_its':20, 'tol':1.e-10}):
pass
def projection(self, opt_state):
"""
Full projection for Langevin.
The state here will be only the state of the optimization variables.
"""
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
if ('subgradient' not in self.selection_variable and
'scaling' not in self.selection_variable): # have not conditioned on any thing else
new_state = opt_state.copy() # not really necessary to copy
new_state[self.scaling_slice] = np.maximum(opt_state[self.scaling_slice], 0)
new_state[self.subgrad_slice] = self.group_lasso_dual.bound_prox(opt_state[self.subgrad_slice])
elif ('subgradient' not in self.selection_variable and
'scaling' in self.selection_variable): # conditioned on the initial scalings
# only the subgradient in opt_state
new_state = self.group_lasso_dual.bound_prox(opt_state)
elif ('subgradient' in self.selection_variable and
'scaling' not in self.selection_variable): # conditioned on the subgradient
# only the scaling in opt_state
new_state = np.maximum(opt_state, 0)
else:
new_state = opt_state
return new_state
# optional things to condition on
def condition_on_subgradient(self):
"""
Maybe we should allow subgradients of only some variables...
"""
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
opt_linear, opt_offset = self.opt_transform
new_offset = opt_linear[:,self.subgrad_slice].dot(self.observed_opt_state[self.subgrad_slice]) + opt_offset
new_linear = opt_linear[:,self.scaling_slice]
self.opt_transform = (new_linear, new_offset)
# for group LASSO this should not induce a bigger jacobian as
# the subgradients are in the interior of a ball
self.selection_variable['subgradient'] = self.observed_opt_state[self.subgrad_slice]
# reset variables
self.observed_opt_state = self.observed_opt_state[self.scaling_slice]
self.scaling_slice = slice(None, None, None)
self.subgrad_slice = np.zeros(new_linear.shape[1], np.bool)
self.num_opt_var = new_linear.shape[1]
def condition_on_scalings(self):
"""
Maybe we should allow subgradients of only some variables...
"""
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
opt_linear, opt_offset = self.opt_transform
new_offset = opt_linear[:,self.scaling_slice].dot(self.observed_opt_state[self.scaling_slice]) + opt_offset
new_linear = opt_linear[:,self.subgrad_slice]
self.opt_transform = (new_linear, new_offset)
# for group LASSO this will induce a bigger jacobian
self.selection_variable['scalings'] = self.observed_opt_state[self.scaling_slice]
# reset slices
self.observed_opt_state = self.observed_opt_state[self.subgrad_slice]
self.subgrad_slice = slice(None, None, None)
self.scaling_slice = np.zeros(new_linear.shape[1], np.bool)
self.num_opt_var = new_linear.shape[1]
def restricted_Mest(Mest_loss, active, solve_args={'min_its':50, 'tol':1.e-10}):
X, Y = Mest_loss.data
if Mest_loss._is_transform:
raise NotImplementedError('to fit restricted model, X must be an ndarray or scipy.sparse; general transforms not implemented')
X_restricted = X[:,active]
loss_restricted = rr.affine_smooth(Mest_loss.saturated_loss, X_restricted)
beta_E = loss_restricted.solve(**solve_args)
return beta_E
class M_estimator_split(M_estimator):
def __init__(self, loss, epsilon, subsample_size, penalty, solve_args={'min_its':50, 'tol':1.e-10}):
total_size = loss.saturated_loss.shape[0]
self.randomization = split(loss.shape, subsample_size, total_size)
M_estimator.__init__(self,loss, epsilon, penalty, self.randomization, solve_args=solve_args)
total_size = loss.saturated_loss.shape[0]
if subsample_size > total_size:
raise ValueError('subsample size must be smaller than total sample size')
self.total_size, self.subsample_size = total_size, subsample_size
def setup_sampler(self, scaling=1., solve_args={'min_its': 50, 'tol': 1.e-10}, B=2000):
M_estimator.setup_sampler(self,
scaling=scaling,
solve_args=solve_args)
# now we need to estimate covariance of
# loss.grad(\beta_E^*) - 1/pi * randomized_loss.grad(\beta_E^*)
m, n, p = self.subsample_size, self.total_size, self.loss.shape[0] # shorthand
#from .glm import pairs_bootstrap_score
bootstrap_score = pairs_bootstrap_score(self.loss,
self._overall,
beta_active=self._beta_full[self._overall],
solve_args=solve_args)
# find unpenalized MLE on subsample
newq, oldq = rr.identity_quadratic(0, 0, 0, 0), self.randomized_loss.quadratic
self.randomized_loss.quadratic = newq
beta_active_subsample = restricted_Mest(self.randomized_loss,
self._overall)
bootstrap_score_split = pairs_bootstrap_score(self.loss,
self._overall,
beta_active=beta_active_subsample,
solve_args=solve_args)
self.randomized_loss.quadratic = oldq
inv_frac = n / m
def subsample_diff(m, n, indices):
subsample = np.random.choice(indices, size=m, replace=False)
full_score = bootstrap_score(indices) # a sum of n terms
randomized_score = bootstrap_score_split(subsample) # a sum of m terms
return full_score - randomized_score * inv_frac
first_moment = np.zeros(p)
second_moment = np.zeros((p, p))
_n = np.arange(n)
for _ in range(B):
indices = np.random.choice(_n, size=n, replace=True)
randomized_score = subsample_diff(m, n, indices)
first_moment += randomized_score
second_moment += np.multiply.outer(randomized_score, randomized_score)
first_moment /= B
second_moment /= B
cov = second_moment - np.multiply.outer(first_moment,
first_moment)
self.randomization.set_covariance(cov)
return bootstrap_score, cov
class M_estimator_approx(M_estimator):
def __init__(self, loss, epsilon, penalty, randomization, randomizer, estimation):
M_estimator.__init__(self, loss, epsilon, penalty, randomization)
self.randomizer = randomizer
self.estimation = estimation
def solve_approx(self):
self.solve()
(_opt_linear_term, _opt_affine_term) = self.opt_transform
self._opt_linear_term = np.concatenate((_opt_linear_term[self._overall, :], _opt_linear_term[~self._overall, :]), 0)
self._opt_affine_term = np.concatenate((_opt_affine_term[self._overall], _opt_affine_term[~self._overall]), 0)
self.opt_transform = (self._opt_linear_term, self._opt_affine_term)
(_score_linear_term, _) = self.score_transform
self._score_linear_term = np.concatenate((_score_linear_term[self._overall, :], _score_linear_term[~self._overall, :]), 0)
self.score_transform = (self._score_linear_term, np.zeros(self._score_linear_term.shape[0]))
self.feasible_point = np.append(self.observed_score_state, np.abs(self.initial_soln[self._overall]))
lagrange = self.penalty._weight_array
self.inactive_lagrange = lagrange[~self._overall]
X, _ = self.loss.data
n, p = X.shape
self.p = p
nactive = self._overall.sum()
self.nactive = nactive
self.target_observed = self.observed_score_state[:self.nactive]
if self.estimation == 'parametric':
score_cov = np.zeros((p,p))
inv_X_active = np.linalg.inv(X[:, self._overall].T.dot(X[:, self._overall]))
projection_X_active = X[:,self._overall].dot(np.linalg.inv(X[:, self._overall].T.dot(X[:, self._overall]))).dot(X[:,self._overall].T)
score_cov[:self.nactive, :self.nactive] = inv_X_active
score_cov[self.nactive:, self.nactive:] = X[:,~self._overall].T.dot(np.identity(n)- projection_X_active).dot(X[:,~self._overall])
elif self.estimation == 'bootstrap':
bootstrap_score = pairs_bootstrap_glm(self.loss,
self._overall,
beta_full=self._beta_full,
inactive=~self._overall)[0]
score_cov = bootstrap_cov(lambda: np.random.choice(n, size=(n,), replace=True), bootstrap_score)
self.score_cov = score_cov
self.target_cov = score_cov[:nactive, :nactive]
self.score_cov_inv = np.linalg.inv(self.score_cov)
self.B = self._opt_linear_term
self.A = self._score_linear_term
self.B_active = self.B[:nactive, :nactive]
self.B_inactive = self.B[nactive:, :nactive]
self.A_active = self._score_linear_term[:nactive, :]
self.A_inactive = self._score_linear_term[nactive:, :]
self.offset_active = self._opt_affine_term[:nactive]
class M_estimator_approx_carved(M_estimator_split):
def __init__(self, loss, epsilon, subsample_size, penalty, estimation):
M_estimator_split.__init__(self,loss, epsilon, subsample_size, penalty, solve_args={'min_its':50, 'tol':1.e-10})
self.estimation = estimation
def solve_approx(self):
self.solve()
self.nactive = self._overall.sum()
X, _ = self.loss.data
n, p = X.shape
self.p = p
self.target_observed = self.observed_score_state[:self.nactive]
self.feasible_point = np.concatenate([self.observed_score_state, np.fabs(self.observed_opt_state[:self.nactive]),
self.observed_opt_state[self.nactive:]], axis = 0)
(_opt_linear_term, _opt_affine_term) = self.opt_transform
self._opt_linear_term = np.concatenate(
(_opt_linear_term[self._overall, :], _opt_linear_term[~self._overall, :]), 0)
self._opt_affine_term = np.concatenate((_opt_affine_term[self._overall], _opt_affine_term[~self._overall]), 0)
self.opt_transform = (self._opt_linear_term, self._opt_affine_term)
(_score_linear_term, _) = self.score_transform
self._score_linear_term = np.concatenate(
(_score_linear_term[self._overall, :], _score_linear_term[~self._overall, :]), 0)
self.score_transform = (self._score_linear_term, np.zeros(self._score_linear_term.shape[0]))
lagrange = self.penalty._weight_array
#print("True or false", np.all(lagrange[0]-np.fabs(self.feasible_point[p+self.nactive:]))>0)
#print("True or false", np.all(self.feasible_point[p:][:self.nactive]) > 0)
self.inactive_lagrange = lagrange[~self._overall]
self.bootstrap_score, self.randomization_cov = self.setup_sampler()
if self.estimation == 'parametric':
score_cov = np.zeros((p,p))
inv_X_active = np.linalg.inv(X[:, self._overall].T.dot(X[:, self._overall]))
projection_X_active = X[:,self._overall].dot(np.linalg.inv(X[:, self._overall].T.dot(X[:, self._overall]))).dot(X[:,self._overall].T)
score_cov[:self.nactive, :self.nactive] = inv_X_active
score_cov[self.nactive:, self.nactive:] = X[:,~self._overall].T.dot(np.identity(n)- projection_X_active).dot(X[:,~self._overall])
elif self.estimation == 'bootstrap':
score_cov = bootstrap_cov(lambda: np.random.choice(n, size=(n,), replace=True), self.bootstrap_score)
self.score_cov = score_cov
self.score_cov_inv = np.linalg.inv(self.score_cov)
class M_estimator_approx_logistic(M_estimator):
def __init__(self, loss, epsilon, penalty, randomization, randomizer, estimation):
M_estimator.__init__(self, loss, epsilon, penalty, randomization)
self.randomizer = randomizer
self.estimation = estimation
def solve_approx(self):
self.solve()
(_opt_linear_term, _opt_affine_term) = self.opt_transform
self._opt_linear_term = np.concatenate((_opt_linear_term[self._overall, :], _opt_linear_term[~self._overall, :]), 0)
self._opt_affine_term = np.concatenate((_opt_affine_term[self._overall], _opt_affine_term[~self._overall]), 0)
self.opt_transform = (self._opt_linear_term, self._opt_affine_term)
(_score_linear_term, _) = self.score_transform
self._score_linear_term = np.concatenate((_score_linear_term[self._overall, :], _score_linear_term[~self._overall, :]), 0)
self.score_transform = (self._score_linear_term, np.zeros(self._score_linear_term.shape[0]))
self.feasible_point = np.append(self.observed_score_state, np.abs(self.initial_soln[self._overall]))
lagrange = self.penalty._weight_array
self.inactive_lagrange = lagrange[~self._overall]
X, _ = self.loss.data
n, p = X.shape
self.p = p
nactive = self._overall.sum()
self.nactive = nactive
self.target_observed = self.observed_score_state[:self.nactive]
if self.estimation == 'parametric':
score_cov = np.zeros((p,p))
vec = np.exp(X[:, self._overall].dot(self.target_observed))
#vec = np.exp(np.zeros(n))
pi = np.true_divide(vec, np.power(1. + vec, 2))
weights = np.diag(pi)
Q_active = X[:, self._overall].T.dot(weights).dot(X[:, self._overall])
Q_active_inv = np.linalg.inv(Q_active)
P_inactive = X[:, ~self._overall].T.dot(np.identity(n)-weights.dot(X[:, self._overall].dot(Q_active_inv)
.dot(X[:, self._overall].T)))
score_cov[:self.nactive, :self.nactive] = Q_active_inv
score_cov[self.nactive:, self.nactive:] = P_inactive.dot(weights).dot(P_inactive.T)
elif self.estimation == 'bootstrap':
bootstrap_score = pairs_bootstrap_glm(self.loss,
self._overall,
beta_full=self._beta_full,
inactive=~self._overall)[0]
score_cov = bootstrap_cov(lambda: np.random.choice(n, size=(n,), replace=True), bootstrap_score)
self.score_cov = score_cov
self.target_cov = score_cov[:nactive, :nactive]
self.score_cov_inv = np.linalg.inv(self.score_cov)
self.B = self._opt_linear_term
self.A = self._score_linear_term
self.B_active = self.B[:nactive, :nactive]
self.B_inactive = self.B[nactive:, :nactive]
self.A_active = self._score_linear_term[:nactive, :]
self.A_inactive = self._score_linear_term[nactive:, :]
self.offset_active = self._opt_affine_term[:nactive]
|
selective-inference/selective-inference
|
selectinf/sandbox/bayesian/estimator.py
|
Python
|
bsd-3-clause
| 29,896
|
"""Compressed Sparse Column matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['csc_matrix', 'isspmatrix_csc']
import numpy as np
from scipy._lib.six import xrange
from ._sparsetools import csc_tocsr
from . import _sparsetools
from .sputils import upcast, isintlike, IndexMixin, get_index_dtype
from .compressed import _cs_matrix
class csc_matrix(_cs_matrix, IndexMixin):
"""
Compressed Sparse Column matrix
This can be instantiated in several ways:
csc_matrix(D)
with a dense matrix or rank-2 ndarray D
csc_matrix(S)
with another sparse matrix S (equivalent to S.tocsc())
csc_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csc_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSC representation where the row indices for
column i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding values are stored in
``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
not supplied, the matrix dimensions are inferred from
the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
CSC format index array
indptr
CSC format index pointer array
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSC format
- efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
- efficient column slicing
- fast matrix vector products (CSR, BSR may be faster)
Disadvantages of the CSC format
- slow row slicing operations (consider CSR)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> csc_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 2, 2, 0, 1, 2])
>>> col = np.array([0, 0, 1, 2, 2, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
"""
format = 'csc'
def transpose(self, copy=False):
from .csr import csr_matrix
M,N = self.shape
return csr_matrix((self.data,self.indices,self.indptr),(N,M),copy=copy)
def __iter__(self):
csr = self.tocsr()
for r in xrange(self.shape[0]):
yield csr[r,:]
def tocsc(self, copy=False):
if copy:
return self.copy()
else:
return self
def tocsr(self):
M,N = self.shape
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csc_tocsr(M, N,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
from .csr import csr_matrix
A = csr_matrix((data, indices, indptr), shape=self.shape)
A.has_sorted_indices = True
return A
def __getitem__(self, key):
# Use CSR to implement fancy indexing.
row, col = self._unpack_index(key)
# Things that return submatrices. row or col is a int or slice.
if (isinstance(row, slice) or isinstance(col, slice) or
isintlike(row) or isintlike(col)):
return self.T[col, row].T
# Things that return a sequence of values.
else:
return self.T[col, row]
def nonzero(self):
# CSC can't use _cs_matrix's .nonzero method because it
# returns the indices sorted for self transposed.
# Get row and col indices, from _cs_matrix.tocoo
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indptr.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
# Sort them to be in C-style order
ind = np.lexsort((col, row))
row = row[ind]
col = col[ind]
return row, col
nonzero.__doc__ = _cs_matrix.nonzero.__doc__
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
# we convert to CSR to maintain compatibility with old impl.
# in spmatrix.getrow()
return self._get_submatrix(i, slice(None)).tocsr()
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSC matrix (column vector).
"""
return self._get_submatrix(slice(None), i)
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self,x):
"""swap the members of x if this is a column-oriented matrix
"""
return (x[1],x[0])
def isspmatrix_csc(x):
return isinstance(x, csc_matrix)
|
Shaswat27/scipy
|
scipy/sparse/csc.py
|
Python
|
bsd-3-clause
| 6,349
|
"""Implementation of __array_function__ overrides from NEP-18."""
import collections
import functools
import os
import textwrap
from numpy.core._multiarray_umath import (
add_docstring, implement_array_function, _get_implementing_args)
from numpy.compat._inspect import getargspec
ARRAY_FUNCTION_ENABLED = bool(
int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1)))
array_function_like_doc = (
"""like : array_like
Reference object to allow the creation of arrays which are not
NumPy arrays. If an array-like passed in as ``like`` supports
the ``__array_function__`` protocol, the result will be defined
by it. In this case, it ensures the creation of an array object
compatible with that passed in via this argument.
.. note::
The ``like`` keyword is an experimental feature pending on
acceptance of :ref:`NEP 35 <NEP35>`."""
)
def set_array_function_like_doc(public_api):
if public_api.__doc__ is not None:
public_api.__doc__ = public_api.__doc__.replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
)
return public_api
add_docstring(
implement_array_function,
"""
Implement a function with checks for __array_function__ overrides.
All arguments are required, and can only be passed by position.
Parameters
----------
implementation : function
Function that implements the operation on NumPy array without
overrides when called like ``implementation(*args, **kwargs)``.
public_api : function
Function exposed by NumPy's public API originally called like
``public_api(*args, **kwargs)`` on which arguments are now being
checked.
relevant_args : iterable
Iterable of arguments to check for __array_function__ methods.
args : tuple
Arbitrary positional arguments originally passed into ``public_api``.
kwargs : dict
Arbitrary keyword arguments originally passed into ``public_api``.
Returns
-------
Result from calling ``implementation()`` or an ``__array_function__``
method, as appropriate.
Raises
------
TypeError : if no implementation is found.
""")
# exposed for testing purposes; used internally by implement_array_function
add_docstring(
_get_implementing_args,
"""
Collect arguments on which to call __array_function__.
Parameters
----------
relevant_args : iterable of array-like
Iterable of possibly array-like arguments to check for
__array_function__ methods.
Returns
-------
Sequence of arguments with __array_function__ methods, in the order in
which they should be called.
""")
ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
def verify_matching_signatures(implementation, dispatcher):
"""Verify that a dispatcher function has the right signature."""
implementation_spec = ArgSpec(*getargspec(implementation))
dispatcher_spec = ArgSpec(*getargspec(dispatcher))
if (implementation_spec.args != dispatcher_spec.args or
implementation_spec.varargs != dispatcher_spec.varargs or
implementation_spec.keywords != dispatcher_spec.keywords or
(bool(implementation_spec.defaults) !=
bool(dispatcher_spec.defaults)) or
(implementation_spec.defaults is not None and
len(implementation_spec.defaults) !=
len(dispatcher_spec.defaults))):
raise RuntimeError('implementation and dispatcher for %s have '
'different function signatures' % implementation)
if implementation_spec.defaults is not None:
if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):
raise RuntimeError('dispatcher functions can only use None for '
'default argument values')
def set_module(module):
"""Decorator for overriding __module__ on a function or class.
Example usage::
@set_module('numpy')
def example():
pass
assert example.__module__ == 'numpy'
"""
def decorator(func):
if module is not None:
func.__module__ = module
return func
return decorator
# Call textwrap.dedent here instead of in the function so as to avoid
# calling dedent multiple times on the same text
_wrapped_func_source = textwrap.dedent("""
@functools.wraps(implementation)
def {name}(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
return implement_array_function(
implementation, {name}, relevant_args, args, kwargs)
""")
def array_function_dispatch(dispatcher, module=None, verify=True,
docs_from_dispatcher=False):
"""Decorator for adding dispatch with the __array_function__ protocol.
See NEP-18 for example usage.
Parameters
----------
dispatcher : callable
Function that when called like ``dispatcher(*args, **kwargs)`` with
arguments from the NumPy function call returns an iterable of
array-like arguments to check for ``__array_function__``.
module : str, optional
__module__ attribute to set on new function, e.g., ``module='numpy'``.
By default, module is copied from the decorated function.
verify : bool, optional
If True, verify the that the signature of the dispatcher and decorated
function signatures match exactly: all required and optional arguments
should appear in order with the same names, but the default values for
all optional arguments should be ``None``. Only disable verification
if the dispatcher's signature needs to deviate for some particular
reason, e.g., because the function has a signature like
``func(*args, **kwargs)``.
docs_from_dispatcher : bool, optional
If True, copy docs from the dispatcher function onto the dispatched
function, rather than from the implementation. This is useful for
functions defined in C, which otherwise don't have docstrings.
Returns
-------
Function suitable for decorating the implementation of a NumPy function.
"""
if not ARRAY_FUNCTION_ENABLED:
def decorator(implementation):
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
if module is not None:
implementation.__module__ = module
return implementation
return decorator
def decorator(implementation):
if verify:
verify_matching_signatures(implementation, dispatcher)
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
# Equivalently, we could define this function directly instead of using
# exec. This version has the advantage of giving the helper function a
# more interpettable name. Otherwise, the original function does not
# show up at all in many cases, e.g., if it's written in C or if the
# dispatcher gets an invalid keyword argument.
source = _wrapped_func_source.format(name=implementation.__name__)
source_object = compile(
source, filename='<__array_function__ internals>', mode='exec')
scope = {
'implementation': implementation,
'dispatcher': dispatcher,
'functools': functools,
'implement_array_function': implement_array_function,
}
exec(source_object, scope)
public_api = scope[implementation.__name__]
if module is not None:
public_api.__module__ = module
public_api._implementation = implementation
return public_api
return decorator
def array_function_from_dispatcher(
implementation, module=None, verify=True, docs_from_dispatcher=True):
"""Like array_function_dispatcher, but with function arguments flipped."""
def decorator(dispatcher):
return array_function_dispatch(
dispatcher, module, verify=verify,
docs_from_dispatcher=docs_from_dispatcher)(implementation)
return decorator
|
grlee77/numpy
|
numpy/core/overrides.py
|
Python
|
bsd-3-clause
| 8,273
|
import numpy as np
from bokeh.models import ColumnDataSource, Plot, LinearAxis, Grid
from bokeh.models.markers import Diamond
from bokeh.io import curdoc, show
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
source = ColumnDataSource(dict(x=x, y=y, sizes=sizes))
plot = Plot(
title=None, plot_width=300, plot_height=300,
h_symmetry=False, v_symmetry=False, min_border=0, toolbar_location=None)
glyph = Diamond(x="x", y="y", size="sizes", line_color="#1c9099", line_width=2, fill_color=None)
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
|
mindriot101/bokeh
|
examples/reference/models/Diamond.py
|
Python
|
bsd-3-clause
| 815
|
def reference_case_attachment_view(request, domain, case_id, attachment_id):
raise NotImplemented("This view is to be overrided by the specific implementations for retrieving case attachments")
|
puttarajubr/commcare-hq
|
corehq/ex-submodules/casexml/apps/case/views.py
|
Python
|
bsd-3-clause
| 201
|
import warnings
from django.views.generic.dates import ArchiveIndexView, DateDetailView, DayArchiveView, MonthArchiveView, \
YearArchiveView
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.base import RedirectView
from django.core.urlresolvers import reverse
from .models import Photo, Gallery
# Gallery views.
class GalleryListView(ListView):
queryset = Gallery.objects.on_site().is_public()
paginate_by = 20
class GalleryDetailView(DetailView):
queryset = Gallery.objects.on_site().is_public()
class GalleryDateView(object):
queryset = Gallery.objects.on_site().is_public()
date_field = 'date_added'
allow_empty = True
class GalleryDateDetailView(GalleryDateView, DateDetailView):
pass
class GalleryArchiveIndexView(GalleryDateView, ArchiveIndexView):
pass
class GalleryDayArchiveView(GalleryDateView, DayArchiveView):
pass
class GalleryMonthArchiveView(GalleryDateView, MonthArchiveView):
pass
class GalleryYearArchiveView(GalleryDateView, YearArchiveView):
make_object_list = True
# Photo views.
class PhotoListView(ListView):
queryset = Photo.objects.on_site().is_public()
paginate_by = 20
class PhotoDetailView(DetailView):
queryset = Photo.objects.on_site().is_public()
class PhotoDateView(object):
queryset = Photo.objects.on_site().is_public()
date_field = 'date_added'
allow_empty = True
class PhotoDateDetailView(PhotoDateView, DateDetailView):
pass
class PhotoArchiveIndexView(PhotoDateView, ArchiveIndexView):
pass
class PhotoDayArchiveView(PhotoDateView, DayArchiveView):
pass
class PhotoMonthArchiveView(PhotoDateView, MonthArchiveView):
pass
class PhotoYearArchiveView(PhotoDateView, YearArchiveView):
make_object_list = True
# Deprecated views.
class DeprecatedMonthMixin(object):
"""Representation of months in urls has changed from a alpha representation ('jan' for January)
to a numeric representation ('01' for January).
Properly deprecate the previous urls."""
query_string = True
month_names = {'jan': '01',
'feb': '02',
'mar': '03',
'apr': '04',
'may': '05',
'jun': '06',
'jul': '07',
'aug': '08',
'sep': '09',
'oct': '10',
'nov': '11',
'dec': '12', }
def get_redirect_url(self, *args, **kwargs):
print('a')
warnings.warn(
DeprecationWarning('Months are now represented in urls by numbers rather than by '
'their first 3 letters. The old style will be removed in Photologue 3.4.'))
class GalleryDateDetailOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(GalleryDateDetailOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:gallery-detail', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']],
'day': kwargs['day'],
'slug': kwargs['slug']})
class GalleryDayArchiveOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(GalleryDayArchiveOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:gallery-archive-day', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']],
'day': kwargs['day']})
class GalleryMonthArchiveOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(GalleryMonthArchiveOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:gallery-archive-month', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']]})
class PhotoDateDetailOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(PhotoDateDetailOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:photo-detail', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']],
'day': kwargs['day'],
'slug': kwargs['slug']})
class PhotoDayArchiveOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(PhotoDayArchiveOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:photo-archive-day', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']],
'day': kwargs['day']})
class PhotoMonthArchiveOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(PhotoMonthArchiveOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:photo-archive-month', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']]})
|
rmaceissoft/django-photologue
|
photologue/views.py
|
Python
|
bsd-3-clause
| 5,802
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script runs swarming_xcode_install on the bots. It should be run when we
need to upgrade all the swarming testers. It:
1) Packages two python files into an isolate.
2) Runs the isolate on swarming machines that satisfy certain dimensions.
Example usage:
$ ./build/run_swarming_xcode_install.py --luci_path ~/work/luci-py \
--swarming-server touch-swarming.appspot.com \
--isolate-server touch-isolate.appspot.com
"""
from __future__ import print_function
import argparse
import os
import shutil
import subprocess
import sys
import tempfile
def main():
parser = argparse.ArgumentParser(
description='Run swarming_xcode_install on the bots.')
parser.add_argument('--luci_path', required=True, type=os.path.abspath)
parser.add_argument('--swarming-server', required=True, type=str)
parser.add_argument('--isolate-server', required=True, type=str)
parser.add_argument('--batches', type=int, default=25,
help="Run xcode install in batches of size |batches|.")
parser.add_argument('--dimension', nargs=2, action='append')
args = parser.parse_args()
args.dimension = args.dimension or []
script_dir = os.path.dirname(os.path.abspath(__file__))
tmp_dir = tempfile.mkdtemp(prefix='swarming_xcode')
try:
print('Making isolate.')
shutil.copyfile(os.path.join(script_dir, 'swarming_xcode_install.py'),
os.path.join(tmp_dir, 'swarming_xcode_install.py'))
shutil.copyfile(os.path.join(script_dir, 'mac_toolchain.py'),
os.path.join(tmp_dir, 'mac_toolchain.py'))
luci_client = os.path.join(args.luci_path, 'client')
cmd = [
sys.executable, os.path.join(luci_client, 'isolateserver.py'), 'archive',
'-I', args.isolate_server, tmp_dir,
]
isolate_hash = subprocess.check_output(cmd).split()[0]
print('Running swarming_xcode_install.')
# TODO(crbug.com/765361): The dimensions below should be updated once
# swarming for iOS is fleshed out, likely removing xcode_version 9 and
# adding different dimensions.
luci_tools = os.path.join(luci_client, 'tools')
dimensions = [['pool', 'Chrome'], ['xcode_version', '9.0']] + args.dimension
dim_args = []
for d in dimensions:
dim_args += ['--dimension'] + d
cmd = [
sys.executable, os.path.join(luci_tools, 'run_on_bots.py'),
'--swarming', args.swarming_server, '--isolate-server',
args.isolate_server, '--priority', '20', '--batches', str(args.batches),
'--tags', 'name:run_swarming_xcode_install',
] + dim_args + ['--name', 'run_swarming_xcode_install', '--', isolate_hash,
'python', 'swarming_xcode_install.py',
]
subprocess.check_call(cmd)
print('All tasks completed.')
finally:
shutil.rmtree(tmp_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
|
endlessm/chromium-browser
|
build/run_swarming_xcode_install.py
|
Python
|
bsd-3-clause
| 3,039
|
Experiment(description='Synthetic data sets of interest',
data_dir='../data/synth/',
max_depth=9,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=300,
verbose=False,
make_predictions=False,
skip_complete=False,
results_dir='../results/synth/',
iters=250,
base_kernels='SE,Per,Lin,Const,Noise',
random_seed=1,
period_heuristic=5,
period_heuristic_type='min',
subset=True,
subset_size=250,
full_iters=10,
bundle_size=2,
additive_form=True,
mean='ff.MeanZero()', # Starting mean
kernel='ff.NoiseKernel()', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='pl2',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'}),
('A', ('*', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', ('*-const', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', 'B', {'A': 'kernel', 'B': 'base'}),
('A', ('CP', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('CW', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('B', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('BL', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('None',), {'A': 'kernel'})])
|
ekamioka/gpss-research
|
experiments/synth/synth.py
|
Python
|
mit
| 1,731
|
"""events
Revision ID: 9c92c85163a9
Revises: 666668eae682
Create Date: 2016-05-09 19:04:44.498817
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '9c92c85163a9'
down_revision = '666668eae682'
def upgrade():
op.create_table('event',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('origin', sa.Unicode(), nullable=True),
sa.Column('data', postgresql.JSONB(), nullable=True),
sa.Column('created_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('processing_log')
def downgrade():
op.create_table('processing_log',
sa.Column('created_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('id', sa.BIGINT(), nullable=False),
sa.Column('operation', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('component', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('source_location', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('content_hash', sa.VARCHAR(length=65), autoincrement=False, nullable=True),
sa.Column('foreign_id', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('source_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('document_id', sa.BIGINT(), autoincrement=False, nullable=True),
sa.Column('meta', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('error_type', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('error_message', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('error_details', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name=u'processing_log_pkey')
)
op.drop_table('event')
|
gazeti/aleph
|
aleph/migrate/versions/9c92c85163a9_events.py
|
Python
|
mit
| 1,959
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import euclidean_distances
from .datasets import make_wave
from .plot_helpers import cm3
def plot_knn_regression(n_neighbors=1):
X, y = make_wave(n_samples=40)
X_test = np.array([[-1.5], [0.9], [1.5]])
dist = euclidean_distances(X, X_test)
closest = np.argsort(dist, axis=0)
plt.figure(figsize=(10, 6))
reg = KNeighborsRegressor(n_neighbors=n_neighbors).fit(X, y)
y_pred = reg.predict(X_test)
for x, y_, neighbors in zip(X_test, y_pred, closest.T):
for neighbor in neighbors[:n_neighbors]:
plt.arrow(x[0], y_, X[neighbor, 0] - x[0], y[neighbor] - y_,
head_width=0, fc='k', ec='k')
train, = plt.plot(X, y, 'o', c=cm3(0))
test, = plt.plot(X_test, -3 * np.ones(len(X_test)), '*', c=cm3(2),
markersize=20)
pred, = plt.plot(X_test, y_pred, '*', c=cm3(0), markersize=20)
plt.vlines(X_test, -3.1, 3.1, linestyle="--")
plt.legend([train, test, pred],
["training data/target", "test data", "test prediction"],
ncol=3, loc=(.1, 1.025))
plt.ylim(-3.1, 3.1)
plt.xlabel("Feature")
plt.ylabel("Target")
|
bgroveben/python3_machine_learning_projects
|
oreilly_GANs_for_beginners/oreilly_GANs_for_beginners/introduction_to_ml_with_python/mglearn/mglearn/plot_knn_regression.py
|
Python
|
mit
| 1,285
|
__author__ = 'Anatoli Kalysch'
|
anatolikalysch/VMAttack
|
ui/__init__.py
|
Python
|
mit
| 31
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pypinyin.phrases_dict import phrases_dict
from pypinyin import pinyin, TONE
phrases_list = set()
phrases_same = set()
for han, pys in phrases_dict.items():
if pinyin(han, style=TONE, heteronym=True) != pys:
phrases_list.add(han)
else:
phrases_same.add(han)
if __name__ == '__main__':
with open('phrases_same.txt', 'w') as f:
for x in phrases_same:
f.write(u'%s\n' % x)
print(len(phrases_dict))
print(len(phrases_same))
print(len(phrases_list))
|
MingStar/python-pinyin
|
tools/small_phrases.py
|
Python
|
mit
| 562
|
"""
Misago default settings
This fille sets everything Misago needs to run.
If you want to add custom app, middleware or path, please update setting vallue
defined in this file instead of copying setting from here to your settings.py.
Yes:
#yourproject/settings.py
INSTALLED_APPS += (
'myawesomeapp',
)
No:
#yourproject/settings.py
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'misago.core',
'misago.conf',
...
'myawesomeapp',
)
"""
# Build paths inside the project like this: os.path.join(MISAGO_BASE_DIR, ...)
import os
MISAGO_BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Default JS debug to false
# This setting used exclusively by test runner and isn't part of public API
_MISAGO_JS_DEBUG = False
# Assets Pipeline
# See http://django-pipeline.readthedocs.org/en/latest/configuration.html
PIPELINE_CSS = {
'misago_admin': {
'source_filenames': (
'misago/admin/css/style.less',
),
'output_filename': 'misago_admin.css',
},
}
PIPELINE_JS = {
'misago_admin': {
'source_filenames': (
'misago/admin/js/jquery.js',
'misago/admin/js/bootstrap.js',
'misago/admin/js/moment.min.js',
'misago/admin/js/bootstrap-datetimepicker.min.js',
'misago/admin/js/misago-datetimepicker.js',
'misago/admin/js/misago-timestamps.js',
'misago/admin/js/misago-tooltips.js',
'misago/admin/js/misago-tables.js',
'misago/admin/js/misago-yesnoswitch.js',
),
'output_filename': 'misago_admin.js',
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.less.LessCompiler',
)
PIPELINE_TEMPLATE_EXT = '.hbs'
PIPELINE_TEMPLATE_FUNC = 'Ember.Handlebars.compile'
PIPELINE_TEMPLATE_NAMESPACE = 'window.Ember.TEMPLATES'
PIPELINE_TEMPLATE_SEPARATOR = '/'
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
# Keep misago.users above django.contrib.auth
# so our management commands take precedence over theirs
'misago.users',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'debug_toolbar',
'pipeline',
'crispy_forms',
'mptt',
'rest_framework',
'misago.admin',
'misago.acl',
'misago.core',
'misago.conf',
'misago.markup',
'misago.notifications',
'misago.legal',
'misago.forums',
'misago.threads',
'misago.readtracker',
'misago.faker',
)
MIDDLEWARE_CLASSES = (
'misago.core.middleware.embercliredirects.EmberCLIRedirectsMiddleware',
'misago.users.middleware.AvatarServerMiddleware',
'misago.users.middleware.RealIPMiddleware',
'misago.core.middleware.frontendcontext.FrontendContextMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'misago.users.middleware.UserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'misago.core.middleware.exceptionhandler.ExceptionHandlerMiddleware',
'misago.users.middleware.OnlineTrackerMiddleware',
'misago.admin.middleware.AdminAuthMiddleware',
'misago.threads.middleware.UnreadThreadsCountMiddleware',
'misago.core.middleware.threadstore.ThreadStoreMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'misago.core.context_processors.site_address',
'misago.conf.context_processors.settings',
'misago.users.context_processors.sites_links',
# Data preloaders
'misago.conf.context_processors.preload_settings_json',
'misago.users.context_processors.preload_user_json',
# Note: keep frontend_context processor last for previous processors
# to be able to add data to request.frontend_context
'misago.core.context_processors.frontend_context',
)
MISAGO_ACL_EXTENSIONS = (
'misago.users.permissions.account',
'misago.users.permissions.profiles',
'misago.users.permissions.warnings',
'misago.users.permissions.moderation',
'misago.users.permissions.delete',
'misago.forums.permissions',
'misago.threads.permissions.threads',
'misago.threads.permissions.privatethreads',
)
MISAGO_MARKUP_EXTENSIONS = ()
MISAGO_POSTING_MIDDLEWARES = (
# Note: always keep FloodProtectionMiddleware middleware first one
'misago.threads.posting.floodprotection.FloodProtectionMiddleware',
'misago.threads.posting.reply.ReplyFormMiddleware',
'misago.threads.posting.participants.ThreadParticipantsFormMiddleware',
'misago.threads.posting.threadlabel.ThreadLabelFormMiddleware',
'misago.threads.posting.threadpin.ThreadPinFormMiddleware',
'misago.threads.posting.threadclose.ThreadCloseFormMiddleware',
'misago.threads.posting.recordedit.RecordEditMiddleware',
'misago.threads.posting.updatestats.UpdateStatsMiddleware',
# Note: always keep SaveChangesMiddleware middleware last one
'misago.threads.posting.savechanges.SaveChangesMiddleware',
)
MISAGO_THREAD_TYPES = (
# category and redirect types
'misago.forums.forumtypes.RootCategory',
'misago.forums.forumtypes.Category',
'misago.forums.forumtypes.Redirect',
# real thread types
'misago.threads.threadtypes.forumthread.ForumThread',
'misago.threads.threadtypes.privatethread.PrivateThread',
'misago.threads.threadtypes.report.Report',
)
# Register Misago directories
LOCALE_PATHS = (
os.path.join(MISAGO_BASE_DIR, 'locale'),
)
STATICFILES_DIRS = (
os.path.join(MISAGO_BASE_DIR, 'static'),
)
TEMPLATE_DIRS = (
os.path.join(MISAGO_BASE_DIR, 'templates'),
)
# Internationalization
USE_I18N = True
USE_L10N = True
USE_TZ = True
TIME_ZONE = 'UTC'
# Misago specific date formats
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MISAGO_COMPACT_DATE_FORMAT_DAY_MONTH = 'j M'
MISAGO_COMPACT_DATE_FORMAT_DAY_MONTH_YEAR = 'M \'y'
# Use Misago CSRF Failure Page
CSRF_FAILURE_VIEW = 'misago.core.errorpages.csrf_failure'
# Use Misago authentication
AUTH_USER_MODEL = 'misago_users.User'
AUTHENTICATION_BACKENDS = (
'misago.users.authbackends.MisagoBackend',
)
MISAGO_NEW_REGISTRATIONS_VALIDATORS = (
'misago.users.validators.validate_gmail_email',
'misago.users.validators.validate_with_sfs',
)
MISAGO_STOP_FORUM_SPAM_USE = True
MISAGO_STOP_FORUM_SPAM_MIN_CONFIDENCE = 80
# How many e-mails should be sent in single step.
# This is used for conserving memory usage when mailing many users at same time
MISAGO_MAILER_BATCH_SIZE = 20
# Auth paths
MISAGO_LOGIN_API_URL = 'auth'
LOGIN_REDIRECT_URL = 'misago:index'
LOGIN_URL = 'misago:login'
LOGOUT_URL = 'misago:logout'
# Misago Admin Path
# Omit starting and trailing slashes
# To disable Misago admin, empty this value
MISAGO_ADMIN_PATH = 'admincp'
# Admin urls namespaces that Misago's AdminAuthMiddleware should protect
MISAGO_ADMIN_NAMESPACES = (
'admin',
'misago:admin',
)
# How long (in minutes) since previous request to admin namespace should
# admin session last.
MISAGO_ADMIN_SESSION_EXPIRATION = 60
# Max age of notifications in days
# Notifications older than this are deleted
# On very active forums its better to keep this smaller
MISAGO_NOTIFICATIONS_MAX_AGE = 40
# Fail-safe limits in case forum is raided by spambot
# No user may exceed those limits, however you may disable
# them by changing them to 0
MISAGO_DIALY_POST_LIMIT = 600
MISAGO_HOURLY_POST_LIMIT = 100
# Function used for generating individual avatar for user
MISAGO_DYNAMIC_AVATAR_DRAWER = 'misago.users.avatars.dynamic.draw_default'
# For which sizes avatars should be cached
# Keep sizes ordered from greatest to smallest
# Max size also controls min size of uploaded image as well as crop size
MISAGO_AVATARS_SIZES = (200, 150, 100, 64, 50, 30, 20)
# Path to avatar server
# This path is used to detect avatar requests, which bypass most of
# Request/Response processing for performance reasons
MISAGO_AVATAR_SERVER_PATH = '/user-avatar'
# Number of posts displayed on single thread page
MISAGO_POSTS_PER_PAGE = 15
MISAGO_THREAD_TAIL = 7
# Controls max age in days of items that Misago has to process to make rankings
# Used for active posters and most liked users lists
# If your forum runs out of memory when trying to generate users rankings list
# or you want those to be more dynamic, give this setting lower value
# You don't have to be overzelous with this as user rankings are cached for 24h
MISAGO_RANKING_LENGTH = 30
# Controls max number of items displayed on ranked lists
MISAGO_RANKING_SIZE = 50
# Controls max number of items displayed on online list
MISAGO_ONLINE_LIST_SIZE = 50
# For how long should online list be cached (in seconds)
MISAGO_ONLINE_LIST_CACHE = 40
# Controls number of users displayed on single page
MISAGO_USERS_PER_PAGE = 12
# Controls amount of data used for new threads/replies lists
# Only unread threads younger than number of days specified in this setting
# will be considered fresh for "new threads" list
# Only unread threads with last reply younger than number of days specified
# there will be confidered fresh for "Threads with unread replies" list
MISAGO_FRESH_CONTENT_PERIOD = 40
# Number of minutes between updates of new content counts like new threads,
# unread replies or moderated threads/posts
MISAGO_CONTENT_COUNTING_FREQUENCY = 5
# X-Sendfile
# X-Sendfile is feature provided by Http servers that allows web apps to
# delegate serving files over to the better performing server instead of
# doing it within app.
# If your server supports X-Sendfile or its variation, enter header name here.
# For example if you are using Nginx with X-accel enabled, set this setting
# to "X-Accel-Redirect".
# Leave this setting empty to Django fallback instead
MISAGO_SENDFILE_HEADER = ''
# Allows you to use location feature of your Http server
# For example, if you have internal location /mymisago/avatar_cache/
# that points at /home/myweb/misagoforum/avatar_cache/, set this setting
# to "mymisago".
MISAGO_SENDFILE_LOCATIONS_PATH = ''
# Default forms templates
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Ember-CLI dev server address, used in rewriting redirects in dev
# If you are using different port to run Ember-CLI dev server,
# override this setting in your settings.py.
MISAGO_EMBER_CLI_ORIGIN = 'http://localhost:4200'
# Rest Framework Configuration
REST_FRAMEWORK = {
'UNAUTHENTICATED_USER': 'misago.users.models.AnonymousUser',
'EXCEPTION_HANDLER': 'misago.core.exceptionhandler.handle_api_exception',
'DEFAULT_PERMISSION_CLASSES': (
'misago.users.rest_permissions.IsAuthenticatedOrReadOnly',
)
}
# Register Misago debug panels
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'misago.acl.panels.MisagoACLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
)
|
390910131/Misago
|
misago/conf/defaults.py
|
Python
|
gpl-2.0
| 12,306
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
lassplit.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from sextante.lidar.lastools.LasToolsUtils import LasToolsUtils
from sextante.lidar.lastools.LasToolsAlgorithm import LasToolsAlgorithm
from sextante.parameters.ParameterFile import ParameterFile
from sextante.outputs.OutputFile import OutputFile
from sextante.parameters.ParameterNumber import ParameterNumber
class lassplit(LasToolsAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
NUM_POINTS = "NUM_POINTS"
def defineCharacteristics(self):
self.name = "lassplit"
self.group = "Tools"
self.addParameter(ParameterFile(lassplit.INPUT, "Input las layer"))
self.addParameter(ParameterNumber(lassplit.NUM_POINTS, "Point in each output file", 1, None, 1000000))
self.addOutput(OutputFile(lassplit.OUTPUT, "Output las file basename"))
self.addCommonParameters()
def processAlgorithm(self, progress):
commands = [os.path.join(LasToolsUtils.LasToolsPath(), "bin", "lassplit.exe")]
commands.append("-i")
commands.append(self.getParameterValue(lassplit.INPUT))
commands.append("-o")
commands.append(self.getOutputValue(lassplit.OUTPUT))
commands.append("-split")
commands.append(self.getParameterValue(lassplit.NUM_POINTS))
self.addCommonParameterValuesToCommand(commands)
LasToolsUtils.runLasTools(commands, progress)
|
slarosa/QGIS
|
python/plugins/sextante/lidar/lastools/lassplit.py
|
Python
|
gpl-2.0
| 2,455
|
#!/usr/bin/python
import sys
import csv
import lxml.etree as ET
# This script creates a CSV file from an XCCDF file formatted in the
# structure of a STIG. This should enable its ingestion into VMS,
# as well as its comparison with VMS output.
xccdf_ns = "http://checklists.nist.gov/xccdf/1.1"
disa_cciuri = "http://iase.disa.mil/stigs/cci/Pages/index.aspx"
disa_srguri = "http://iase.disa.mil/stigs/srgs/Pages/index.aspx"
def parse_xml_file(xmlfile):
with open(xmlfile, 'r') as xml_file:
filestring = xml_file.read()
tree = ET.fromstring(filestring)
return tree
def reflist(refs):
refstring = ', '.join(refs)
return refstring
def node_to_text(node):
textslist = node.xpath(".//text()")
return ''.join(textslist)
def main():
if len(sys.argv) < 2:
print "Provide an XCCDF file to convert into a CSV file."
sys.exit(1)
xccdffile = sys.argv[1]
xccdftree = parse_xml_file(xccdffile)
rules = xccdftree.findall(".//{%s}Rule" % xccdf_ns)
rulewriter = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)
for rule in rules:
cci_refs = [ref.text for ref in rule.findall("{%s}ident[@system='%s']"
% (xccdf_ns, disa_cciuri))]
srg_refs = [ref.text for ref in rule.findall("{%s}ident[@system='%s']"
% (xccdf_ns, disa_srguri))]
title = rule.find("{%s}title" % xccdf_ns).text
description = node_to_text(rule.find("{%s}description" % xccdf_ns))
fixtext = node_to_text(rule.find("{%s}fixtext" % xccdf_ns))
checktext = node_to_text(rule.find(".//{%s}check-content" % xccdf_ns))
row = [reflist(cci_refs), reflist(srg_refs), title, description, fixtext, checktext]
rulewriter.writerow(row)
sys.exit(0)
if __name__ == "__main__":
main()
|
mpreisler/scap-security-guide-debian
|
scap-security-guide-0.1.21/shared/modules/xccdf2csv_stig_module.py
|
Python
|
gpl-2.0
| 1,883
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.topology_read', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator', u'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator*', u'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator&', u'ns3::AttributeConstructionList::CIterator&')
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Packet> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )', u'ns3::Mac48Address::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )*', u'ns3::Mac48Address::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )&', u'ns3::Mac48Address::TracedCallback&')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
module.add_class('Mac8Address', import_from_module='ns.network')
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
root_module['ns3::Mac8Address'].implicitly_converts_to(root_module['ns3::Address'])
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::ItemType [enumeration]
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper [class]
module.add_class('TopologyReaderHelper')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias(u'uint32_t', u'ns3::TypeId::hash_t')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::TypeId::hash_t*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::TypeId::hash_t&')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## topology-reader.h (module 'topology-read'): ns3::TopologyReader [class]
module.add_class('TopologyReader', parent=root_module['ns3::Object'])
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link [class]
module.add_class('Link', outer_class=root_module['ns3::TopologyReader'])
typehandlers.add_type_alias(u'std::map< std::string, std::string > const_iterator', u'ns3::TopologyReader::Link::ConstAttributesIterator')
typehandlers.add_type_alias(u'std::map< std::string, std::string > const_iterator*', u'ns3::TopologyReader::Link::ConstAttributesIterator*')
typehandlers.add_type_alias(u'std::map< std::string, std::string > const_iterator&', u'ns3::TopologyReader::Link::ConstAttributesIterator&')
typehandlers.add_type_alias(u'std::list< ns3::TopologyReader::Link > const_iterator', u'ns3::TopologyReader::ConstLinksIterator')
typehandlers.add_type_alias(u'std::list< ns3::TopologyReader::Link > const_iterator*', u'ns3::TopologyReader::ConstLinksIterator*')
typehandlers.add_type_alias(u'std::list< ns3::TopologyReader::Link > const_iterator&', u'ns3::TopologyReader::ConstLinksIterator&')
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## inet-topology-reader.h (module 'topology-read'): ns3::InetTopologyReader [class]
module.add_class('InetTopologyReader', parent=root_module['ns3::TopologyReader'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( )', u'ns3::NetDevice::LinkChangeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( )*', u'ns3::NetDevice::LinkChangeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( )&', u'ns3::NetDevice::LinkChangeTracedCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::ReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::ReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::ReceiveCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::PromiscReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::PromiscReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::PromiscReceiveCallback&')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::ProtocolHandler')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::ProtocolHandler*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::ProtocolHandler&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::DeviceAdditionListener')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::DeviceAdditionListener*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::DeviceAdditionListener&')
## orbis-topology-reader.h (module 'topology-read'): ns3::OrbisTopologyReader [class]
module.add_class('OrbisTopologyReader', parent=root_module['ns3::TopologyReader'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )', u'ns3::Packet::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )*', u'ns3::Packet::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )&', u'ns3::Packet::TracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )', u'ns3::Packet::AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )*', u'ns3::Packet::AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )&', u'ns3::Packet::AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )', u'ns3::Packet::TwoAddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )*', u'ns3::Packet::TwoAddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )&', u'ns3::Packet::TwoAddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )', u'ns3::Packet::Mac48AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )*', u'ns3::Packet::Mac48AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )&', u'ns3::Packet::Mac48AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )', u'ns3::Packet::SizeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )*', u'ns3::Packet::SizeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )&', u'ns3::Packet::SizeTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )', u'ns3::Packet::SinrTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )*', u'ns3::Packet::SinrTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )&', u'ns3::Packet::SinrTracedCallback&')
## rocketfuel-topology-reader.h (module 'topology-read'): ns3::RocketfuelTopologyReader [class]
module.add_class('RocketfuelTopologyReader', parent=root_module['ns3::TopologyReader'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_container('std::map< std::string, std::string >', ('std::string', 'std::string'), container_type=u'map')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )*', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )*', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >'])
register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TopologyReaderHelper_methods(root_module, root_module['ns3::TopologyReaderHelper'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TopologyReader_methods(root_module, root_module['ns3::TopologyReader'])
register_Ns3TopologyReaderLink_methods(root_module, root_module['ns3::TopologyReader::Link'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3InetTopologyReader_methods(root_module, root_module['ns3::InetTopologyReader'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3OrbisTopologyReader_methods(root_module, root_module['ns3::OrbisTopologyReader'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3RocketfuelTopologyReader_methods(root_module, root_module['ns3::RocketfuelTopologyReader'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function]
cls.add_method('GetRemainingSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeAccessor>::Delete(ns3::AttributeAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeAccessor *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeChecker> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeChecker > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeChecker>::Delete(ns3::AttributeChecker * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeChecker *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeValue> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeValue > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeValue>::Delete(ns3::AttributeValue * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeValue *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter(ns3::DefaultDeleter<ns3::CallbackImplBase> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::CallbackImplBase > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::CallbackImplBase>::Delete(ns3::CallbackImplBase * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::CallbackImplBase *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter(ns3::DefaultDeleter<ns3::Hash::Implementation> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Hash::Implementation > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Hash::Implementation>::Delete(ns3::Hash::Implementation * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Hash::Implementation *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter(ns3::DefaultDeleter<ns3::NixVector> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::NixVector > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::NixVector>::Delete(ns3::NixVector * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::NixVector *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Packet>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Packet>::DefaultDeleter(ns3::DefaultDeleter<ns3::Packet> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Packet > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Packet>::Delete(ns3::Packet * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Packet *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::TraceSourceAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::TraceSourceAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::TraceSourceAccessor>::Delete(ns3::TraceSourceAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::TraceSourceAccessor *', 'object')],
is_static=True)
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac8Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac8Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3Mac8Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(ns3::Mac8Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac8Address const &', 'arg0')])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address() [constructor]
cls.add_constructor([])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(uint8_t addr) [constructor]
cls.add_constructor([param('uint8_t', 'addr')])
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac8Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyFrom(uint8_t const * pBuffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'pBuffer')])
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyTo(uint8_t * pBuffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'pBuffer')],
is_const=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static bool ns3::Mac8Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::type [variable]
cls.add_instance_attribute('type', 'ns3::PacketMetadata::Item::ItemType', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t v) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t v) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TopologyReaderHelper_methods(root_module, cls):
## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper::TopologyReaderHelper(ns3::TopologyReaderHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::TopologyReaderHelper const &', 'arg0')])
## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper::TopologyReaderHelper() [constructor]
cls.add_constructor([])
## topology-reader-helper.h (module 'topology-read'): ns3::Ptr<ns3::TopologyReader> ns3::TopologyReaderHelper::GetTopologyReader() [member function]
cls.add_method('GetTopologyReader',
'ns3::Ptr< ns3::TopologyReader >',
[])
## topology-reader-helper.h (module 'topology-read'): void ns3::TopologyReaderHelper::SetFileName(std::string const fileName) [member function]
cls.add_method('SetFileName',
'void',
[param('std::string const', 'fileName')])
## topology-reader-helper.h (module 'topology-read'): void ns3::TopologyReaderHelper::SetFileType(std::string const fileType) [member function]
cls.add_method('SetFileType',
'void',
[param('std::string const', 'fileType')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(std::size_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(std::size_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::hash_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'ns3::TypeId::hash_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint16_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint16_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint16_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint16_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(std::size_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(ns3::TypeId::hash_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(ns3::TypeId::hash_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(std::size_t i, ns3::Ptr<const ns3::AttributeValue> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('std::size_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<const ns3::Object> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
return
def register_Ns3TopologyReader_methods(root_module, cls):
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::TopologyReader() [constructor]
cls.add_constructor([])
## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::AddLink(ns3::TopologyReader::Link link) [member function]
cls.add_method('AddLink',
'void',
[param('ns3::TopologyReader::Link', 'link')])
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::GetFileName() const [member function]
cls.add_method('GetFileName',
'std::string',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::TopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::ConstLinksIterator ns3::TopologyReader::LinksBegin() const [member function]
cls.add_method('LinksBegin',
'ns3::TopologyReader::ConstLinksIterator',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): bool ns3::TopologyReader::LinksEmpty() const [member function]
cls.add_method('LinksEmpty',
'bool',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::ConstLinksIterator ns3::TopologyReader::LinksEnd() const [member function]
cls.add_method('LinksEnd',
'ns3::TopologyReader::ConstLinksIterator',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): int ns3::TopologyReader::LinksSize() const [member function]
cls.add_method('LinksSize',
'int',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::TopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_pure_virtual=True, is_virtual=True)
## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::SetFileName(std::string const & fileName) [member function]
cls.add_method('SetFileName',
'void',
[param('std::string const &', 'fileName')])
return
def register_Ns3TopologyReaderLink_methods(root_module, cls):
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link::Link(ns3::TopologyReader::Link const & arg0) [constructor]
cls.add_constructor([param('ns3::TopologyReader::Link const &', 'arg0')])
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link::Link(ns3::Ptr<ns3::Node> fromPtr, std::string const & fromName, ns3::Ptr<ns3::Node> toPtr, std::string const & toName) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'fromPtr'), param('std::string const &', 'fromName'), param('ns3::Ptr< ns3::Node >', 'toPtr'), param('std::string const &', 'toName')])
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link::ConstAttributesIterator ns3::TopologyReader::Link::AttributesBegin() const [member function]
cls.add_method('AttributesBegin',
'ns3::TopologyReader::Link::ConstAttributesIterator',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link::ConstAttributesIterator ns3::TopologyReader::Link::AttributesEnd() const [member function]
cls.add_method('AttributesEnd',
'ns3::TopologyReader::Link::ConstAttributesIterator',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetAttribute(std::string const & name) const [member function]
cls.add_method('GetAttribute',
'std::string',
[param('std::string const &', 'name')],
is_const=True)
## topology-reader.h (module 'topology-read'): bool ns3::TopologyReader::Link::GetAttributeFailSafe(std::string const & name, std::string & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string const &', 'name'), param('std::string &', 'value')],
is_const=True)
## topology-reader.h (module 'topology-read'): ns3::Ptr<ns3::Node> ns3::TopologyReader::Link::GetFromNode() const [member function]
cls.add_method('GetFromNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetFromNodeName() const [member function]
cls.add_method('GetFromNodeName',
'std::string',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): ns3::Ptr<ns3::Node> ns3::TopologyReader::Link::GetToNode() const [member function]
cls.add_method('GetToNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetToNodeName() const [member function]
cls.add_method('GetToNodeName',
'std::string',
[],
is_const=True)
## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::Link::SetAttribute(std::string const & name, std::string const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string const &', 'name'), param('std::string const &', 'value')])
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'void'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::NetDevice> '])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet const> '])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'unsigned short'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::Address const&'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::NetDevice::PacketType'])
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3InetTopologyReader_methods(root_module, cls):
## inet-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::InetTopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## inet-topology-reader.h (module 'topology-read'): ns3::InetTopologyReader::InetTopologyReader() [constructor]
cls.add_constructor([])
## inet-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::InetTopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::NetDevice::PromiscReceiveCallback cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::NetDevice::ReceiveCallback cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function]
cls.add_method('GetLocalTime',
'ns3::Time',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Node::DeviceAdditionListener listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Node::ProtocolHandler handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Node::DeviceAdditionListener listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Node::ProtocolHandler handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3OrbisTopologyReader_methods(root_module, cls):
## orbis-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::OrbisTopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## orbis-topology-reader.h (module 'topology-read'): ns3::OrbisTopologyReader::OrbisTopologyReader() [constructor]
cls.add_constructor([])
## orbis-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::OrbisTopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_virtual=True)
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header, uint32_t size) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header'), param('uint32_t', 'size')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3RocketfuelTopologyReader_methods(root_module, cls):
## rocketfuel-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::RocketfuelTopologyReader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## rocketfuel-topology-reader.h (module 'topology-read'): ns3::RocketfuelTopologyReader::RocketfuelTopologyReader() [constructor]
cls.add_constructor([])
## rocketfuel-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::RocketfuelTopologyReader::Read() [member function]
cls.add_method('Read',
'ns3::NodeContainer',
[],
is_virtual=True)
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): ns3::ObjectBase * ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()() [member operator]
cls.add_method('operator()',
'ns3::ObjectBase *',
[],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<ns3::NetDevice> arg0, ns3::Ptr<const ns3::Packet> arg1, short unsigned int arg2, ns3::Address const & arg3, ns3::Address const & arg4, ns3::NetDevice::PacketType arg5) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'arg0'), param('ns3::Ptr< ns3::Packet const >', 'arg1'), param('short unsigned int', 'arg2'), param('ns3::Address const &', 'arg3'), param('ns3::Address const &', 'arg4'), param('ns3::NetDevice::PacketType', 'arg5')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<ns3::NetDevice> arg0) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'arg0')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.add_cpp_namespace('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
tomhenderson/ns-3-dev-git
|
src/topology-read/bindings/modulegen__gcc_LP64.py
|
Python
|
gpl-2.0
| 251,206
|
version_info = (1, 4, 2)
__version__ = '.'.join(map(str, version_info))
|
Vdragon/git-cola
|
qtpy/_version.py
|
Python
|
gpl-2.0
| 72
|
# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for WebJournal."""
__revision__ = \
"$Id$"
# pylint cds-invenio/modules/webjournal/lib/webjournal_tests.py
import unittest
from invenio.webjournal_utils import compare_issues
from invenio.webjournal import issue_is_later_than
#from invenio import webjournal_utils
from invenio.testutils import make_test_suite, run_test_suite
#from invenio.config import CFG_SITE_URL
class TestCompareIssues(unittest.TestCase):
"""Tests for comparing issues."""
def test_compare_issues(self):
"""webjournal - tests comparing issues"""
issue1 = '06/2009'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), -1)
issue1 = '07/2009'
issue2 = '06/2009'
self.assertEqual(compare_issues(issue1, issue2), 1)
issue1 = '07/2009'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), 0)
issue1 = '07/2009'
issue2 = '07/2008'
self.assertEqual(compare_issues(issue1, issue2), 1)
issue1 = '07/2008'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), -1)
def test_issue1_is_later_than(self):
"""webjournal - tests comparing issue1 is later than issue2 """
issue1 = '07/2009'
issue2 = '07/2008'
self.assertEqual(issue_is_later_than(issue1, issue2), True)
issue1 = '07/2008'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
issue1 = '07/2009'
issue2 = '06/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), True)
issue1 = '06/2009'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
issue1 = '07/2009'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
TEST_SUITE = make_test_suite(TestCompareIssues)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
ppiotr/Bibedit-some-refactoring
|
modules/webjournal/lib/webjournal_tests.py
|
Python
|
gpl-2.0
| 2,817
|
import os
import sys
import datetime as dt
import json
from itertools import groupby
from kivy.properties import (StringProperty,
DictProperty,
ListProperty,
BooleanProperty)
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import Screen
from kivy.uix.gridlayout import GridLayout
from core.bglabel import BGLabel
from MythTV import MythBE
EPOCH = dt.datetime(1970, 1, 1)
class MythRecording(BoxLayout):
"""Widget class for displaying information about upcoming recordings."""
rec = DictProperty({})
bg = ListProperty([0.1, 0.15, 0.15, 1])
def __init__(self, **kwargs):
super(MythRecording, self).__init__(**kwargs)
self.rec = kwargs["rec"]
class MythRecordingHeader(BGLabel):
"""Widget class for grouping recordings by day."""
rec_date = StringProperty("")
def __init__(self, **kwargs):
super(MythRecordingHeader, self).__init__(**kwargs)
self.bgcolour = [0.1, 0.1, 0.4, 1]
self.rec_date = kwargs["rec_date"]
class MythTVScreen(Screen):
"""Main screen class for MythTV schedule.
Screen attempts to connect to MythTV backend and retrieve list of
upcoming recordings and display this.
Data is cached so that information can still be viewed even if backend
is offline (e.g. for power saving purposes).
"""
backendonline = BooleanProperty(False)
isrecording = BooleanProperty(False)
def __init__(self, **kwargs):
super(MythTVScreen, self).__init__(**kwargs)
# Get the path for the folder
scr = sys.modules[self.__class__.__module__].__file__
# Create variable to retain path to our cache fie
self.screendir = os.path.dirname(scr)
self.cacheFile = os.path.join(self.screendir, "cache", "cache.json")
# Some other useful variable
self.running = False
self.rec_timer = None
self.status_timer = None
self.be = None
self.recs = None
def on_enter(self):
# We only update when we enter the screen. No need for regular updates.
self.getRecordings()
self.drawScreen()
self.checkRecordingStatus()
def on_leave(self):
pass
def cacheRecs(self, recs):
"""Method to save local copy of recordings. Backend may not be online
all the time so a cache enables us to display recordings if if we
can't poll the server for an update.
"""
with open(self.cacheFile, 'w') as outfile:
json.dump(recs, outfile)
def loadCache(self):
"""Retrieves cached recorings and returns as a python list object."""
try:
raw = open(self.cacheFile, 'r')
recs = json.load(raw)
except:
recs = []
return recs
def recs_to_dict(self, uprecs):
"""Converts the MythTV upcoming recording iterator into a list of
dict objects.
"""
raw_recs = []
recs = []
# Turn the response into a dict object and add to our list of recorings
for r in uprecs:
rec = {}
st = r.starttime
et = r.endtime
rec["title"] = r.title
rec["subtitle"] = r.subtitle if r.subtitle else ""
day = dt.datetime(st.year, st.month, st.day)
rec["day"] = (day - EPOCH).total_seconds()
rec["time"] = "{} - {}".format(st.strftime("%H:%M"),
et.strftime("%H:%M"))
rec["timestamp"] = (st - EPOCH).total_seconds()
rec["desc"] = r.description
raw_recs.append(rec)
# Group the recordings by day (so we can print a header)
for k, g in groupby(raw_recs, lambda x: x["day"]):
recs.append((k, list(g)))
return recs
def getRecordings(self):
"""Attempts to connect to MythTV backend and retrieve recordings."""
try:
# If we can connect then get recordings and save a local cache.
self.be = MythBE()
uprecs = self.be.getUpcomingRecordings()
self.recs = self.recs_to_dict(uprecs)
self.cacheRecs(self.recs)
self.backendonline = True
except:
# Can't connect so we need to set variables accordinly and try
# to load data from the cache.
self.be = None
self.recs = self.loadCache()
self.backendonline = False
def checkRecordingStatus(self):
"""Checks whether the backend is currently recording."""
try:
recbe = MythBE()
for recorder in recbe.getRecorderList():
if recbe.isRecording(recorder):
self.isrecording = True
break
except:
# If we can't connect to it then it can't be recording.
self.isrecording = False
def drawScreen(self):
"""Main method for rendering screen.
If there is recording data (live or cached) then is laid out in a
scroll view.
If not, the user is notified that the backend is unreachable.
"""
sv = self.ids.myth_scroll
sv.clear_widgets()
if self.recs:
# Create a child widget to hold the recordings.
self.sl = GridLayout(cols=1, size_hint=(1, None), spacing=2)
self.sl.bind(minimum_height=self.sl.setter('height'))
# Loop over the list of recordings.
for rec in self.recs:
# These are grouped by day so we need a header
day = dt.timedelta(0, rec[0]) + EPOCH
mrh = MythRecordingHeader(rec_date=day.strftime("%A %d %B"))
self.sl.add_widget(mrh)
# Then we loop over the recordings scheduled for that day
for r in rec[1]:
# and add them to the display.
mr = MythRecording(rec=r)
self.sl.add_widget(mr)
sv.add_widget(self.sl)
else:
lb = Label(text="Backend is unreachable and there is no cached"
" information")
sv.add_widget(lb)
|
9and3r/RPi-InfoScreen-Kivy
|
screens/mythtv/screen.py
|
Python
|
gpl-3.0
| 6,297
|
# urllib3/response.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import gzip
import logging
import zlib
from io import BytesIO
from .exceptions import HTTPError
try:
basestring = basestring
except NameError: # Python 3
basestring = (str, bytes)
log = logging.getLogger(__name__)
def decode_gzip(data):
gzipper = gzip.GzipFile(fileobj=BytesIO(data))
return gzipper.read()
def decode_deflate(data):
try:
return zlib.decompress(data)
except zlib.error:
return zlib.decompress(data, -zlib.MAX_WBITS)
class HTTPResponse(object):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = {
'gzip': decode_gzip,
'deflate': decode_deflate,
}
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = headers or {}
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self._decode_content = decode_content
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in [301, 302, 303, 307]:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, decoding and caching
is skipped because we can't decode partial content nor does it make
sense to cache partial content as the full response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header. (Overridden if ``amt`` is set.)
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
content_encoding = self.headers.get('content-encoding')
decoder = self.CONTENT_DECODERS.get(content_encoding)
if decode_content is None:
decode_content = self._decode_content
if self._fp is None:
return
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
else:
return self._fp.read(amt)
try:
if decode_content and decoder:
data = decoder(data)
except IOError:
raise HTTPError("Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding)
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
# In Python 3, the header keys are returned capitalised
headers=dict((k.lower(), v) for k,v in r.getheaders()),
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
|
mozilla/mwc
|
vendor-local/packages/requests/requests/packages/urllib3/response.py
|
Python
|
mpl-2.0
| 6,422
|
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import clv_insured_card
|
CLVsol/odoo_addons
|
clv_medicament_dispensation_mng/clv_insured_card/__init__.py
|
Python
|
agpl-3.0
| 1,428
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
LOG = logging.getLogger(__name__)
class ComputeCapabilitiesFilter(filters.BaseHostFilter):
"""HostFilter hard-coded to work with InstanceType records."""
# Instance type and host capabilities do not change within a request
run_filter_once_per_request = True
def _get_capabilities(self, host_state, scope):
cap = host_state
for index in range(0, len(scope)):
try:
if isinstance(cap, six.string_types):
try:
cap = jsonutils.loads(cap)
except ValueError as e:
LOG.debug("%(host_state)s fails. The capabilities "
"'%(cap)s' couldn't be loaded from JSON: "
"%(error)s",
{'host_state': host_state, 'cap': cap,
'error': e})
return None
if not isinstance(cap, dict):
if getattr(cap, scope[index], None) is None:
# If can't find, check stats dict
cap = cap.stats.get(scope[index], None)
else:
cap = getattr(cap, scope[index], None)
else:
cap = cap.get(scope[index], None)
except AttributeError as e:
LOG.debug("%(host_state)s fails. The capabilities couldn't "
"be retrieved: %(error)s.",
{'host_state': host_state, 'error': e})
return None
if cap is None:
LOG.debug("%(host_state)s fails. There are no capabilities "
"to retrieve.",
{'host_state': host_state})
return None
return cap
def _satisfies_extra_specs(self, host_state, instance_type):
"""Check that the host_state provided by the compute service
satisfies the extra specs associated with the instance type.
"""
if 'extra_specs' not in instance_type:
return True
for key, req in instance_type.extra_specs.items():
# Either not scope format, or in capabilities scope
scope = key.split(':')
# If key does not have a namespace, the scope's size is 1, check
# whether host_state contains the key as an attribute. If not,
# ignore it. If it contains, deal with it in the same way as
# 'capabilities:key'. This is for backward-compatible.
# If the key has a namespace, the scope's size will be bigger than
# 1, check that whether the namespace is 'capabilities'. If not,
# ignore it.
if len(scope) == 1:
stats = getattr(host_state, 'stats', {})
has_attr = hasattr(host_state, key) or key in stats
if not has_attr:
continue
else:
if scope[0] != "capabilities":
continue
else:
del scope[0]
cap = self._get_capabilities(host_state, scope)
if cap is None:
return False
if not extra_specs_ops.match(str(cap), req):
LOG.debug("%(host_state)s fails extra_spec requirements. "
"'%(req)s' does not match '%(cap)s'",
{'host_state': host_state, 'req': req,
'cap': cap})
return False
return True
def host_passes(self, host_state, spec_obj):
"""Return a list of hosts that can create instance_type."""
instance_type = spec_obj.flavor
if not self._satisfies_extra_specs(host_state, instance_type):
LOG.debug("%(host_state)s fails instance_type extra_specs "
"requirements", {'host_state': host_state})
return False
return True
|
rajalokan/nova
|
nova/scheduler/filters/compute_capabilities_filter.py
|
Python
|
apache-2.0
| 4,838
|
# (c) Copyright 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for Cisco fc zone client cli."""
from mock import patch
from cinder import exception
from cinder.openstack.common import processutils
from cinder import test
from cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli \
import CiscoFCZoneClientCLI
import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
nsshow = '20:1a:00:05:1e:e8:e3:29'
switch_data = ['VSAN 303\n',
'----------------------------------------------------------\n',
'FCID TYPE PWWN (VENDOR) FC4-TYPE:FEATURE\n',
'----------------------------------------------------------\n',
'0x030001 N 20:1a:00:05:1e:e8:e3:29 (Cisco) ipfc\n',
'0x030101 NL 10:00:00:00:77:99:60:2c (Interphase)\n',
'0x030200 NL 10:00:00:49:c9:28:c7:01\n']
cfgactv = ['zoneset name OpenStack_Cfg vsan 303\n',
'zone name openstack50060b0000c26604201900051ee8e329 vsan 303\n',
'pwwn 50:06:0b:00:00:c2:66:04\n',
'pwwn 20:19:00:05:1e:e8:e3:29\n']
active_zoneset = {
'zones': {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']},
'active_zone_config': 'OpenStack_Cfg'}
zoning_status_data_basic = [
'VSAN: 303 default-zone: deny distribute: active only Interop: default\n',
' mode: basic merge-control: allow\n',
' session: none\n',
' hard-zoning: enabled broadcast: unsupported\n',
' smart-zoning: disabled\n',
' rscn-format: fabric-address\n',
'Default zone:\n',
' qos: none broadcast: unsupported ronly: unsupported\n',
'Full Zoning Database :\n',
' DB size: 220 bytes\n',
' Zonesets:2 Zones:2 Aliases: 0\n',
'Active Zoning Database :\n',
' DB size: 80 bytes\n',
' Name: test-zs-test Zonesets:1 Zones:1\n',
'Status:\n']
zoning_status_basic = {'mode': 'basic', 'session': 'none'}
zoning_status_data_enhanced_nosess = [
'VSAN: 303 default-zone: deny distribute: active only Interop: default\n',
' mode: enhanced merge-control: allow\n',
' session: none\n',
' hard-zoning: enabled broadcast: unsupported\n',
' smart-zoning: disabled\n',
' rscn-format: fabric-address\n',
'Default zone:\n',
' qos: none broadcast: unsupported ronly: unsupported\n',
'Full Zoning Database :\n',
' DB size: 220 bytes\n',
' Zonesets:2 Zones:2 Aliases: 0\n',
'Active Zoning Database :\n',
' DB size: 80 bytes\n',
' Name: test-zs-test Zonesets:1 Zones:1\n',
'Status:\n']
zoning_status_enhanced_nosess = {'mode': 'enhanced', 'session': 'none'}
zoning_status_data_enhanced_sess = [
'VSAN: 303 default-zone: deny distribute: active only Interop: default\n',
' mode: enhanced merge-control: allow\n',
' session: otherthannone\n',
' hard-zoning: enabled broadcast: unsupported\n',
' smart-zoning: disabled\n',
' rscn-format: fabric-address\n',
'Default zone:\n',
' qos: none broadcast: unsupported ronly: unsupported\n',
'Full Zoning Database :\n',
' DB size: 220 bytes\n',
' Zonesets:2 Zones:2 Aliases: 0\n',
'Active Zoning Database :\n',
' DB size: 80 bytes\n',
' Name: test-zs-test Zonesets:1 Zones:1\n',
'Status:\n']
zoning_status_enhanced_sess = {'mode': 'enhanced', 'session': 'otherthannone'}
active_zoneset_multiple_zones = {
'zones': {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'],
'openstack50060b0000c26602201900051ee8e327':
['50:06:0b:00:00:c2:66:02', '20:19:00:05:1e:e8:e3:27']},
'active_zone_config': 'OpenStack_Cfg'}
new_zone = {'openstack10000012345678902001009876543210':
['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10']}
new_zones = {'openstack10000012345678902001009876543210':
['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10'],
'openstack10000011111111112001001111111111':
['10:00:00:11:11:11:11:11', '20:01:00:11:11:11:11:11']}
zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329'
class TestCiscoFCZoneClientCLI(CiscoFCZoneClientCLI, test.TestCase):
def setUp(self):
super(TestCiscoFCZoneClientCLI, self).setUp()
self.fabric_vsan = '303'
# override some of the functions
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
@patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_active_zone_set(self, get_switch_info_mock):
cmd_list = [ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan,
' | no-more']
get_switch_info_mock.return_value = cfgactv
active_zoneset_returned = self.get_active_zone_set()
get_switch_info_mock.assert_called_once_with(cmd_list)
self.assertDictMatch(active_zoneset_returned, active_zoneset)
@patch.object(CiscoFCZoneClientCLI, '_run_ssh')
def test_get_active_zone_set_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.CiscoZoningCliException,
self.get_active_zone_set)
@patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_zoning_status_basic(self, get_zoning_status_mock):
cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]
get_zoning_status_mock.return_value = zoning_status_data_basic
zoning_status_returned = self.get_zoning_status()
get_zoning_status_mock.assert_called_once_with(cmd_list)
self.assertDictMatch(zoning_status_returned, zoning_status_basic)
@patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_zoning_status_enhanced_nosess(self, get_zoning_status_mock):
cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]
get_zoning_status_mock.return_value =\
zoning_status_data_enhanced_nosess
zoning_status_returned = self.get_zoning_status()
get_zoning_status_mock.assert_called_once_with(cmd_list)
self.assertDictMatch(zoning_status_returned,
zoning_status_enhanced_nosess)
@patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_zoning_status_enhanced_sess(self, get_zoning_status_mock):
cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]
get_zoning_status_mock.return_value = zoning_status_data_enhanced_sess
zoning_status_returned = self.get_zoning_status()
get_zoning_status_mock.assert_called_once_with(cmd_list)
self.assertDictMatch(zoning_status_returned,
zoning_status_enhanced_sess)
@patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_nameserver_info(self, get_switch_info_mock):
ns_info_list = []
ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29']
get_switch_info_mock.return_value = (switch_data)
ns_info_list = self.get_nameserver_info()
self.assertEqual(ns_info_list, ns_info_list_expected)
@patch.object(CiscoFCZoneClientCLI, '_run_ssh')
def test_get_nameserver_info_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.CiscoZoningCliException,
self.get_nameserver_info)
@patch.object(CiscoFCZoneClientCLI, '_run_ssh')
def test__cfg_save(self, run_ssh_mock):
cmd_list = ['copy', 'running-config', 'startup-config']
self._cfg_save()
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@patch.object(CiscoFCZoneClientCLI, '_run_ssh')
def test__get_switch_info(self, run_ssh_mock):
cmd_list = [ZoneConstant.FCNS_SHOW, self.fabric_vsan]
nsshow_list = [nsshow]
run_ssh_mock.return_value = (Stream(nsshow), Stream())
switch_data = self._get_switch_info(cmd_list)
self.assertEqual(switch_data, nsshow_list)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
def test__parse_ns_output(self):
return_wwn_list = []
expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29']
return_wwn_list = self._parse_ns_output(switch_data)
self.assertEqual(return_wwn_list, expected_wwn_list)
class Channel(object):
def recv_exit_status(self):
return 0
class Stream(object):
def __init__(self, buffer=''):
self.buffer = buffer
self.channel = Channel()
def readlines(self):
return self.buffer
def splitlines(self):
return self.buffer.splitlines()
def close(self):
pass
def flush(self):
self.buffer = ''
|
alex8866/cinder
|
cinder/tests/zonemanager/test_cisco_fc_zone_client_cli.py
|
Python
|
apache-2.0
| 9,495
|
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Build tool setup for MacOS.
This module is a SCons tool which should be include in the topmost mac
environment.
It is used as follows:
env = base_env.Clone(tools = ['component_setup'])
mac_env = base_env.Clone(tools = ['target_platform_mac'])
"""
import SCons.Script
def ComponentPlatformSetup(env, builder_name):
"""Hook to allow platform to modify environment inside a component builder.
Args:
env: Environment to modify
builder_name: Name of the builder
"""
if env.get('ENABLE_EXCEPTIONS'):
env.FilterOut(CCFLAGS=['-fno-exceptions'])
env.Append(CCFLAGS=['-fexceptions'])
#------------------------------------------------------------------------------
# TODO: This bundle builder here needs refactoring to use ComponentPackage().
# Until that refactoring, consider this code very experimental (i.e., don't use
# it unless you're ok with it changing out from underneath you).
def BundlePseudoBuilder(env, target, **kwargs):
"""MacOS Bundle PseudoBuilder.
Args:
env: Environment in which to build
target: Name of the bundle to build
kwargs: Additional parameters to set in the environment
Returns:
The target is returned.
"""
# Don't change the environment passed into the pseudo-builder
env = env.Clone()
# Bring keywords args into the environment.
for k, v in kwargs.items():
env[k] = v
# Make sure BUNDLE_RESOURCES is set and not empty; force it to be a list
bundle_resources = env.Flatten(env.get('BUNDLE_RESOURCES', []))
if not bundle_resources:
raise ValueError('BUNDLE_RESOURCES must be set and non-empty')
# Make each resource into a directory node.
# TODO: this seems a little too restrictive.
# bundle_resources = [env.Dir(i) for i in bundle_resources]
bundle_resources = [i for i in bundle_resources]
# Create a PkgInfo file only if BUNDLE_PKGINFO_FILENAME is useful.
# (NPAPI bundles are unhappy with PkgInfo files.)
if env.get('BUNDLE_PKGINFO_FILENAME'):
pkginfo_create_command = ('$BUNDLE_GENERATE_PKGINFO '
'>$TARGET/$BUNDLE_PKGINFO_FILENAME')
else:
pkginfo_create_command = '/bin/echo no PkgInfo will be created' # noop
# Add the build step for the bundle.
p = env.Command(env.Dir(target),
[env.File('$BUNDLE_EXE'),
env.File('$BUNDLE_INFO_PLIST')] +
bundle_resources,
[SCons.Script.Delete('$TARGET'),
SCons.Script.Mkdir('$TARGET/Contents'),
SCons.Script.Mkdir('$TARGET/Contents/MacOS'),
SCons.Script.Mkdir('$TARGET/Contents/Resources'),
'cp -f $SOURCE $TARGET/Contents/MacOS',
'cp -f ${SOURCES[1]} $TARGET/Contents',
pkginfo_create_command,
'cp -rf ${SOURCES[2:]} $TARGET/Contents/Resources'])
# Add an alias for this target.
# This also allows the 'all_bundles' target to build me.
a = env.Alias(target, p)
for group in env['COMPONENT_BUNDLE_GROUPS']:
SCons.Script.Alias(group, a)
return env.Dir(target)
#------------------------------------------------------------------------------
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
# Preserve some variables that get blown away by the tools.
saved = dict()
for k in ['ASFLAGS', 'CFLAGS', 'CCFLAGS', 'CXXFLAGS', 'LINKFLAGS', 'LIBS']:
saved[k] = env.get(k, [])
env[k] = []
# Use g++
env.Tool('g++')
env.Tool('gcc')
env.Tool('gnulink')
env.Tool('ar')
env.Tool('as')
env.Tool('applelink')
# Set target platform bits
env.SetBits('mac', 'posix')
env.Replace(
TARGET_PLATFORM='MAC',
COMPONENT_PLATFORM_SETUP=ComponentPlatformSetup,
CCFLAG_INCLUDE='-include', # Command line option to include a header
# Code coverage related.
COVERAGE_CCFLAGS=['-ftest-coverage', '-fprofile-arcs', '-DCOVERAGE'],
COVERAGE_LIBS='gcov',
COVERAGE_STOP_CMD=[
'$COVERAGE_MCOV --directory "$TARGET_ROOT" --output "$TARGET"',
('$COVERAGE_GENHTML --output-directory $COVERAGE_HTML_DIR '
'$COVERAGE_OUTPUT_FILE'),
],
# Libraries expect to be in the same directory as their executables.
# This is correct for unit tests, and for libraries which are published
# in Contents/MacOS next to their executables.
DYLIB_INSTALL_NAME_FLAGS=[
'-install_name',
'@loader_path/${TARGET.file}'
],
)
env.Append(
# Mac apps and dylibs have a more strict relationship about where they
# expect to find each other. When an app is linked, it stores the
# relative path from itself to any dylibs it links against. Override
# this so that it will store the relative path from $LIB_DIR instead.
# This is similar to RPATH on Linux.
LINKFLAGS = [
'-Xlinker', '-executable_path',
'-Xlinker', '$LIB_DIR',
],
# Similarly, tell the library where it expects to find itself once it's
# installed.
SHLINKFLAGS = ['$DYLIB_INSTALL_NAME_FLAGS'],
# Settings for debug
CCFLAGS_DEBUG=['-g'],
LINKFLAGS_DEBUG=['-g'],
# Settings for optimized
# Optimized for space by default, which is what Xcode does
CCFLAGS_OPTIMIZED=['-Os'],
# Settings for component_builders
COMPONENT_LIBRARY_LINK_SUFFIXES=['.dylib', '.a'],
COMPONENT_LIBRARY_DEBUG_SUFFIXES=[],
# New 'all' target. Helpful: "hammer -h" now lists it!
COMPONENT_BUNDLE_GROUPS=['all_bundles'],
)
# Set default values used by the Bundle pseudobuilder.
env.Replace(
BUNDLE_TYPE='APPL',
BUNDLE_STRING='${BUNDLE_TYPE}????',
BUNDLE_GENERATE_PKGINFO='echo "${BUNDLE_STRING}"',
BUNDLE_PKGINFO_FILENAME='PkgInfo',
BUNDLE_INFO_PLIST='Info.plist',
)
# Add the Bundle pseudobuilder.
env.AddMethod(BundlePseudoBuilder, 'Bundle')
# Add our target groups
AddTargetGroup('all_bundles', 'bundles can be built')
# Restore saved flags.
env.Append(**saved)
|
plxaye/chromium
|
src/native_client_sdk/src/site_scons/site_tools/target_platform_mac.py
|
Python
|
apache-2.0
| 7,631
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 NTT corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command-line flag library.
Emulates gflags by wrapping cfg.ConfigOpts.
The idea is to move fully to cfg eventually, and this wrapper is a
stepping stone.
"""
import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from cinder.i18n import _
CONF = cfg.CONF
logging.register_options(CONF)
core_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for cinder-api'),
cfg.StrOpt('state_path',
default='/var/lib/cinder',
deprecated_name='pybasedir',
help="Top-level directory for maintaining cinder's state"), ]
debug_opts = [
]
CONF.register_cli_opts(core_opts)
CONF.register_cli_opts(debug_opts)
global_opts = [
cfg.StrOpt('my_ip',
default=netutils.get_my_ipv4(),
help='IP address of this host'),
cfg.StrOpt('glance_host',
default='$my_ip',
help='Default glance host name or IP'),
cfg.IntOpt('glance_port',
default=9292,
help='Default glance port'),
cfg.ListOpt('glance_api_servers',
default=['$glance_host:$glance_port'],
help='A list of the glance API servers available to cinder '
'([hostname|ip]:port)'),
cfg.IntOpt('glance_api_version',
default=1,
help='Version of the glance API to use'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number retries when downloading an image from glance'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance'),
cfg.BoolOpt('glance_api_ssl_compression',
default=False,
help='Enables or disables negotiation of SSL layer '
'compression. In some cases disabling compression '
'can improve data throughput, such as when high '
'network bandwidth is available and you use '
'compressed image formats like qcow2.'),
cfg.StrOpt('glance_ca_certificates_file',
help='Location of ca certificates file to use for glance '
'client requests.'),
cfg.IntOpt('glance_request_timeout',
default=None,
help='http/https timeout value for glance operations. If no '
'value (None) is supplied here, the glanceclient default '
'value is used.'),
cfg.StrOpt('scheduler_topic',
default='cinder-scheduler',
help='The topic that scheduler nodes listen on'),
cfg.StrOpt('volume_topic',
default='cinder-volume',
help='The topic that volume nodes listen on'),
cfg.StrOpt('backup_topic',
default='cinder-backup',
help='The topic that volume backup nodes listen on'),
cfg.BoolOpt('enable_v1_api',
default=True,
help=_("DEPRECATED: Deploy v1 of the Cinder API.")),
cfg.BoolOpt('enable_v2_api',
default=True,
help=_("Deploy v2 of the Cinder API.")),
cfg.BoolOpt('api_rate_limit',
default=True,
help='Enables or disables rate limit of the API.'),
cfg.ListOpt('osapi_volume_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'volume_extension option with cinder.api.contrib.'
'select_extensions'),
cfg.MultiStrOpt('osapi_volume_extension',
default=['cinder.api.contrib.standard_extensions'],
help='osapi volume extension to load'),
cfg.StrOpt('volume_manager',
default='cinder.volume.manager.VolumeManager',
help='Full class name for the Manager for volume'),
cfg.StrOpt('backup_manager',
default='cinder.backup.manager.BackupManager',
help='Full class name for the Manager for volume backup'),
cfg.StrOpt('scheduler_manager',
default='cinder.scheduler.manager.SchedulerManager',
help='Full class name for the Manager for scheduler'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a host name, FQDN, or IP address.'),
# NOTE(vish): default to nova for compatibility with nova installs
cfg.StrOpt('storage_availability_zone',
default='nova',
help='Availability zone of this node'),
cfg.StrOpt('default_availability_zone',
default=None,
help='Default availability zone for new volumes. If not set, '
'the storage_availability_zone option value is used as '
'the default for new volumes.'),
cfg.StrOpt('default_volume_type',
default=None,
help='Default volume type to use'),
cfg.StrOpt('volume_usage_audit_period',
default='month',
help='Time period for which to generate volume usages. '
'The options are hour, day, month, or year.'),
cfg.StrOpt('rootwrap_config',
default='/etc/cinder/rootwrap.conf',
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.BoolOpt('monkey_patch',
default=False,
help='Enable monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[],
help='List of modules/decorators to monkey patch'),
cfg.IntOpt('service_down_time',
default=60,
help='Maximum time since last check-in for a service to be '
'considered up'),
cfg.StrOpt('volume_api_class',
default='cinder.volume.api.API',
help='The full class name of the volume API class to use'),
cfg.StrOpt('backup_api_class',
default='cinder.backup.api.API',
help='The full class name of the volume backup API class'),
cfg.StrOpt('auth_strategy',
default='keystone',
choices=['noauth', 'keystone', 'deprecated'],
help='The strategy to use for auth. Supports noauth, keystone, '
'and deprecated.'),
cfg.ListOpt('enabled_backends',
default=None,
help='A list of backend names to use. These backend names '
'should be backed by a unique [CONFIG] group '
'with its options'),
cfg.BoolOpt('no_snapshot_gb_quota',
default=False,
help='Whether snapshots count against gigabyte quota'),
cfg.StrOpt('transfer_api_class',
default='cinder.transfer.api.API',
help='The full class name of the volume transfer API class'),
cfg.StrOpt('replication_api_class',
default='cinder.replication.api.API',
help='The full class name of the volume replication API class'),
cfg.StrOpt('consistencygroup_api_class',
default='cinder.consistencygroup.api.API',
help='The full class name of the consistencygroup API class'),
cfg.StrOpt('os_privileged_user_name',
default=None,
help='OpenStack privileged account username. Used for requests '
'to other services (such as Nova) that require an account '
'with special rights.'),
cfg.StrOpt('os_privileged_user_password',
default=None,
help='Password associated with the OpenStack privileged '
'account.',
secret=True),
cfg.StrOpt('os_privileged_user_tenant',
default=None,
help='Tenant name associated with the OpenStack privileged '
'account.'),
]
CONF.register_opts(global_opts)
|
saeki-masaki/cinder
|
cinder/common/config.py
|
Python
|
apache-2.0
| 9,037
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from dispatch.topology import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
]
|
alanconway/dispatch
|
console/dispatch-dashboard/dispatch/topology/urls.py
|
Python
|
apache-2.0
| 693
|